Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/fork.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | */ |
| 6 | |
| 7 | /* |
| 8 | * 'fork.c' contains the help-routines for the 'fork' system call |
| 9 | * (see also entry.S and others). |
| 10 | * Fork is rather simple, once you get the hang of it, but the memory |
| 11 | * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' |
| 12 | */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/slab.h> |
Ingo Molnar | 4eb5aaa | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 15 | #include <linux/sched/autogroup.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 16 | #include <linux/sched/mm.h> |
Ingo Molnar | f7ccbae | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 17 | #include <linux/sched/coredump.h> |
Ingo Molnar | 8703e8a | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 18 | #include <linux/sched/user.h> |
Ingo Molnar | 6a3827d | 2017-02-08 18:51:31 +0100 | [diff] [blame] | 19 | #include <linux/sched/numa_balancing.h> |
Ingo Molnar | 03441a3 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 20 | #include <linux/sched/stat.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 21 | #include <linux/sched/task.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 22 | #include <linux/sched/task_stack.h> |
Ingo Molnar | 32ef551 | 2017-02-05 11:48:36 +0100 | [diff] [blame] | 23 | #include <linux/sched/cputime.h> |
Ingo Molnar | 037741a | 2017-02-03 10:08:30 +0100 | [diff] [blame] | 24 | #include <linux/rtmutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/init.h> |
| 26 | #include <linux/unistd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/module.h> |
| 28 | #include <linux/vmalloc.h> |
| 29 | #include <linux/completion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/personality.h> |
| 31 | #include <linux/mempolicy.h> |
| 32 | #include <linux/sem.h> |
| 33 | #include <linux/file.h> |
Al Viro | 9f3acc3 | 2008-04-24 07:44:08 -0400 | [diff] [blame] | 34 | #include <linux/fdtable.h> |
Jens Axboe | da9cbc8 | 2008-06-30 20:42:08 +0200 | [diff] [blame] | 35 | #include <linux/iocontext.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/key.h> |
| 37 | #include <linux/binfmts.h> |
| 38 | #include <linux/mman.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 39 | #include <linux/mmu_notifier.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 40 | #include <linux/hmm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/fs.h> |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 42 | #include <linux/mm.h> |
| 43 | #include <linux/vmacache.h> |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 44 | #include <linux/nsproxy.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 45 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <linux/cpu.h> |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 47 | #include <linux/cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/security.h> |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 49 | #include <linux/hugetlb.h> |
Will Drewry | e2cfabdf | 2012-04-12 16:47:57 -0500 | [diff] [blame] | 50 | #include <linux/seccomp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/swap.h> |
| 52 | #include <linux/syscalls.h> |
| 53 | #include <linux/jiffies.h> |
| 54 | #include <linux/futex.h> |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 55 | #include <linux/compat.h> |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 56 | #include <linux/kthread.h> |
Andrew Morton | 7c3ab738 | 2006-12-10 02:19:19 -0800 | [diff] [blame] | 57 | #include <linux/task_io_accounting_ops.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 58 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/ptrace.h> |
| 60 | #include <linux/mount.h> |
| 61 | #include <linux/audit.h> |
Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 62 | #include <linux/memcontrol.h> |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 63 | #include <linux/ftrace.h> |
Mike Galbraith | 5e2bf01 | 2012-05-10 13:01:45 -0700 | [diff] [blame] | 64 | #include <linux/proc_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #include <linux/profile.h> |
| 66 | #include <linux/rmap.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 67 | #include <linux/ksm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <linux/acct.h> |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 69 | #include <linux/userfaultfd_k.h> |
Jay Lan | 8f0ab51 | 2006-09-30 23:28:59 -0700 | [diff] [blame] | 70 | #include <linux/tsacct_kern.h> |
Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 71 | #include <linux/cn_proc.h> |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 72 | #include <linux/freezer.h> |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 73 | #include <linux/delayacct.h> |
Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 74 | #include <linux/taskstats_kern.h> |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 75 | #include <linux/random.h> |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 76 | #include <linux/tty.h> |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 77 | #include <linux/blkdev.h> |
Al Viro | 5ad4e53 | 2009-03-29 19:50:06 -0400 | [diff] [blame] | 78 | #include <linux/fs_struct.h> |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 79 | #include <linux/magic.h> |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 80 | #include <linux/sched/mm.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 81 | #include <linux/perf_event.h> |
Stanislaw Gruszka | 42c4ab4 | 2009-07-29 12:15:26 +0200 | [diff] [blame] | 82 | #include <linux/posix-timers.h> |
Avi Kivity | 8e7cac7 | 2009-11-29 16:34:48 +0200 | [diff] [blame] | 83 | #include <linux/user-return-notifier.h> |
Ying Han | 3d5992d | 2010-10-26 14:21:23 -0700 | [diff] [blame] | 84 | #include <linux/oom.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 85 | #include <linux/khugepaged.h> |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 86 | #include <linux/signalfd.h> |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 87 | #include <linux/uprobes.h> |
Kent Overstreet | a27bb33 | 2013-05-07 16:19:08 -0700 | [diff] [blame] | 88 | #include <linux/aio.h> |
Gideon Israel Dsouza | 52f5684c | 2014-04-07 15:39:20 -0700 | [diff] [blame] | 89 | #include <linux/compiler.h> |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 90 | #include <linux/sysctl.h> |
Dmitry Vyukov | 5c9a875 | 2016-03-22 14:27:30 -0700 | [diff] [blame] | 91 | #include <linux/kcov.h> |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 92 | #include <linux/livepatch.h> |
Mark Rutland | 48ac3c1 | 2017-07-14 12:23:09 +0100 | [diff] [blame] | 93 | #include <linux/thread_info.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | #include <asm/pgtable.h> |
| 96 | #include <asm/pgalloc.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 97 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | #include <asm/mmu_context.h> |
| 99 | #include <asm/cacheflush.h> |
| 100 | #include <asm/tlbflush.h> |
| 101 | |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 102 | #include <trace/events/sched.h> |
| 103 | |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 104 | #define CREATE_TRACE_POINTS |
| 105 | #include <trace/events/task.h> |
| 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | /* |
Heinrich Schuchardt | ac1b398 | 2015-04-16 12:47:47 -0700 | [diff] [blame] | 108 | * Minimum number of threads to boot the kernel |
| 109 | */ |
| 110 | #define MIN_THREADS 20 |
| 111 | |
| 112 | /* |
| 113 | * Maximum number of threads |
| 114 | */ |
| 115 | #define MAX_THREADS FUTEX_TID_MASK |
| 116 | |
| 117 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | * Protected counters by write_lock_irq(&tasklist_lock) |
| 119 | */ |
| 120 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 121 | int nr_threads; /* The idle threads do not count.. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
| 123 | int max_threads; /* tunable limit on nr_threads */ |
| 124 | |
| 125 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
| 126 | |
Christoph Hellwig | c59923a | 2006-07-10 04:45:40 -0700 | [diff] [blame] | 127 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
Paul E. McKenney | db1466b | 2010-03-03 07:46:56 -0800 | [diff] [blame] | 128 | |
| 129 | #ifdef CONFIG_PROVE_RCU |
| 130 | int lockdep_tasklist_lock_is_held(void) |
| 131 | { |
| 132 | return lockdep_is_held(&tasklist_lock); |
| 133 | } |
| 134 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); |
| 135 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | int nr_processes(void) |
| 138 | { |
| 139 | int cpu; |
| 140 | int total = 0; |
| 141 | |
Ian Campbell | 1d51075 | 2009-11-03 10:11:14 +0000 | [diff] [blame] | 142 | for_each_possible_cpu(cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | total += per_cpu(process_counts, cpu); |
| 144 | |
| 145 | return total; |
| 146 | } |
| 147 | |
Akinobu Mita | f19b9f7 | 2012-07-30 14:42:33 -0700 | [diff] [blame] | 148 | void __weak arch_release_task_struct(struct task_struct *tsk) |
| 149 | { |
| 150 | } |
| 151 | |
Thomas Gleixner | f5e1028 | 2012-05-05 15:05:48 +0000 | [diff] [blame] | 152 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 153 | static struct kmem_cache *task_struct_cachep; |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 154 | |
| 155 | static inline struct task_struct *alloc_task_struct_node(int node) |
| 156 | { |
| 157 | return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); |
| 158 | } |
| 159 | |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 160 | static inline void free_task_struct(struct task_struct *tsk) |
| 161 | { |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 162 | kmem_cache_free(task_struct_cachep, tsk); |
| 163 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | #endif |
| 165 | |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 166 | void __weak arch_release_thread_stack(unsigned long *stack) |
Akinobu Mita | f19b9f7 | 2012-07-30 14:42:33 -0700 | [diff] [blame] | 167 | { |
| 168 | } |
| 169 | |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 170 | #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR |
Thomas Gleixner | 4110180 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 171 | |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 172 | /* |
| 173 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
| 174 | * kmemcache based allocator. |
| 175 | */ |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 176 | # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 177 | |
| 178 | #ifdef CONFIG_VMAP_STACK |
| 179 | /* |
| 180 | * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB |
| 181 | * flush. Try to minimize the number of calls by caching stacks. |
| 182 | */ |
| 183 | #define NR_CACHED_STACKS 2 |
| 184 | static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); |
Hoeun Ryu | 19659c5 | 2017-05-08 15:56:11 -0700 | [diff] [blame] | 185 | |
| 186 | static int free_vm_stack_cache(unsigned int cpu) |
| 187 | { |
| 188 | struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); |
| 189 | int i; |
| 190 | |
| 191 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
| 192 | struct vm_struct *vm_stack = cached_vm_stacks[i]; |
| 193 | |
| 194 | if (!vm_stack) |
| 195 | continue; |
| 196 | |
| 197 | vfree(vm_stack->addr); |
| 198 | cached_vm_stacks[i] = NULL; |
| 199 | } |
| 200 | |
| 201 | return 0; |
| 202 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 203 | #endif |
| 204 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 205 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 206 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 207 | #ifdef CONFIG_VMAP_STACK |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 208 | void *stack; |
| 209 | int i; |
| 210 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 211 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
Christoph Lameter | 112166f | 2017-07-12 14:33:11 -0700 | [diff] [blame] | 212 | struct vm_struct *s; |
| 213 | |
| 214 | s = this_cpu_xchg(cached_stacks[i], NULL); |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 215 | |
| 216 | if (!s) |
| 217 | continue; |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 218 | |
Konstantin Khlebnikov | ca18255 | 2017-10-13 15:58:22 -0700 | [diff] [blame] | 219 | /* Clear stale pointers from reused stack. */ |
| 220 | memset(s->addr, 0, THREAD_SIZE); |
Kees Cook | e01e806 | 2018-04-20 14:55:31 -0700 | [diff] [blame] | 221 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 222 | tsk->stack_vm_area = s; |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 223 | return s->addr; |
| 224 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 225 | |
Mark Rutland | 48ac3c1 | 2017-07-14 12:23:09 +0100 | [diff] [blame] | 226 | stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 227 | VMALLOC_START, VMALLOC_END, |
Michal Hocko | 19809c2 | 2017-05-08 15:57:44 -0700 | [diff] [blame] | 228 | THREADINFO_GFP, |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 229 | PAGE_KERNEL, |
| 230 | 0, node, __builtin_return_address(0)); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 231 | |
| 232 | /* |
| 233 | * We can't call find_vm_area() in interrupt context, and |
| 234 | * free_thread_stack() can be called in interrupt context, |
| 235 | * so cache the vm_struct. |
| 236 | */ |
| 237 | if (stack) |
| 238 | tsk->stack_vm_area = find_vm_area(stack); |
| 239 | return stack; |
| 240 | #else |
Vladimir Davydov | 4949148 | 2016-07-26 15:24:24 -0700 | [diff] [blame] | 241 | struct page *page = alloc_pages_node(node, THREADINFO_GFP, |
| 242 | THREAD_SIZE_ORDER); |
Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 243 | |
| 244 | return page ? page_address(page) : NULL; |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 245 | #endif |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 246 | } |
| 247 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 248 | static inline void free_thread_stack(struct task_struct *tsk) |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 249 | { |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 250 | #ifdef CONFIG_VMAP_STACK |
| 251 | if (task_stack_vm_area(tsk)) { |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 252 | int i; |
| 253 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 254 | for (i = 0; i < NR_CACHED_STACKS; i++) { |
Christoph Lameter | 112166f | 2017-07-12 14:33:11 -0700 | [diff] [blame] | 255 | if (this_cpu_cmpxchg(cached_stacks[i], |
| 256 | NULL, tsk->stack_vm_area) != NULL) |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 257 | continue; |
| 258 | |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 259 | return; |
| 260 | } |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 261 | |
Andrey Ryabinin | 0f110a9 | 2016-12-12 16:44:14 -0800 | [diff] [blame] | 262 | vfree_atomic(tsk->stack); |
Andy Lutomirski | ac496bf | 2016-09-15 22:45:49 -0700 | [diff] [blame] | 263 | return; |
| 264 | } |
| 265 | #endif |
| 266 | |
| 267 | __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER); |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 268 | } |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 269 | # else |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 270 | static struct kmem_cache *thread_stack_cache; |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 271 | |
Michael Ellerman | 9521d39 | 2016-06-25 21:53:30 +1000 | [diff] [blame] | 272 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 273 | int node) |
| 274 | { |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 275 | return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 276 | } |
| 277 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 278 | static void free_thread_stack(struct task_struct *tsk) |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 279 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 280 | kmem_cache_free(thread_stack_cache, tsk->stack); |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 281 | } |
| 282 | |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 283 | void thread_stack_cache_init(void) |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 284 | { |
David Windsor | f9d29946 | 2017-06-10 22:50:41 -0400 | [diff] [blame] | 285 | thread_stack_cache = kmem_cache_create_usercopy("thread_stack", |
| 286 | THREAD_SIZE, THREAD_SIZE, 0, 0, |
| 287 | THREAD_SIZE, NULL); |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 288 | BUG_ON(thread_stack_cache == NULL); |
Thomas Gleixner | 0d15d74 | 2012-05-05 15:05:41 +0000 | [diff] [blame] | 289 | } |
| 290 | # endif |
FUJITA Tomonori | b69c49b | 2008-07-25 01:45:40 -0700 | [diff] [blame] | 291 | #endif |
| 292 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 294 | static struct kmem_cache *signal_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | |
| 296 | /* SLAB cache for sighand_struct structures (tsk->sighand) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 297 | struct kmem_cache *sighand_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
| 299 | /* SLAB cache for files_struct structures (tsk->files) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 300 | struct kmem_cache *files_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | |
| 302 | /* SLAB cache for fs_struct structures (tsk->fs) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 303 | struct kmem_cache *fs_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | |
| 305 | /* SLAB cache for vm_area_struct structures */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 306 | struct kmem_cache *vm_area_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
| 308 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 309 | static struct kmem_cache *mm_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 311 | static void account_kernel_stack(struct task_struct *tsk, int account) |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 312 | { |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 313 | void *stack = task_stack_page(tsk); |
| 314 | struct vm_struct *vm = task_stack_vm_area(tsk); |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 315 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 316 | BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0); |
Andy Lutomirski | efdc949 | 2016-07-28 15:48:17 -0700 | [diff] [blame] | 317 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 318 | if (vm) { |
| 319 | int i; |
| 320 | |
| 321 | BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); |
| 322 | |
| 323 | for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { |
| 324 | mod_zone_page_state(page_zone(vm->pages[i]), |
| 325 | NR_KERNEL_STACK_KB, |
| 326 | PAGE_SIZE / 1024 * account); |
| 327 | } |
| 328 | |
| 329 | /* All stack pages belong to the same memcg. */ |
Johannes Weiner | ed52be7 | 2017-07-06 15:40:49 -0700 | [diff] [blame] | 330 | mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB, |
| 331 | account * (THREAD_SIZE / 1024)); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 332 | } else { |
| 333 | /* |
| 334 | * All stack pages are in the same zone and belong to the |
| 335 | * same memcg. |
| 336 | */ |
| 337 | struct page *first_page = virt_to_page(stack); |
| 338 | |
| 339 | mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB, |
| 340 | THREAD_SIZE / 1024 * account); |
| 341 | |
Johannes Weiner | ed52be7 | 2017-07-06 15:40:49 -0700 | [diff] [blame] | 342 | mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB, |
| 343 | account * (THREAD_SIZE / 1024)); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 344 | } |
KOSAKI Motohiro | c6a7f57 | 2009-09-21 17:01:32 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 347 | static void release_task_stack(struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | { |
Andy Lutomirski | 405c075 | 2016-10-31 08:11:43 -0700 | [diff] [blame] | 349 | if (WARN_ON(tsk->state != TASK_DEAD)) |
| 350 | return; /* Better to leak the stack than to free prematurely */ |
| 351 | |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 352 | account_kernel_stack(tsk, -1); |
Linus Torvalds | b235bee | 2016-06-24 15:09:37 -0700 | [diff] [blame] | 353 | arch_release_thread_stack(tsk->stack); |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 354 | free_thread_stack(tsk); |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 355 | tsk->stack = NULL; |
| 356 | #ifdef CONFIG_VMAP_STACK |
| 357 | tsk->stack_vm_area = NULL; |
| 358 | #endif |
| 359 | } |
| 360 | |
| 361 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 362 | void put_task_stack(struct task_struct *tsk) |
| 363 | { |
| 364 | if (atomic_dec_and_test(&tsk->stack_refcount)) |
| 365 | release_task_stack(tsk); |
| 366 | } |
| 367 | #endif |
| 368 | |
| 369 | void free_task(struct task_struct *tsk) |
| 370 | { |
| 371 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
| 372 | /* |
| 373 | * The task is finally done with both the stack and thread_info, |
| 374 | * so free both. |
| 375 | */ |
| 376 | release_task_stack(tsk); |
| 377 | #else |
| 378 | /* |
| 379 | * If the task had a separate stack allocation, it should be gone |
| 380 | * by now. |
| 381 | */ |
| 382 | WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0); |
| 383 | #endif |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 384 | rt_mutex_debug_task_free(tsk); |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 385 | ftrace_graph_exit_task(tsk); |
Will Drewry | e2cfabdf | 2012-04-12 16:47:57 -0500 | [diff] [blame] | 386 | put_seccomp_filter(tsk); |
Akinobu Mita | f19b9f7 | 2012-07-30 14:42:33 -0700 | [diff] [blame] | 387 | arch_release_task_struct(tsk); |
Oleg Nesterov | 1da5c46 | 2016-11-29 18:50:57 +0100 | [diff] [blame] | 388 | if (tsk->flags & PF_KTHREAD) |
| 389 | free_kthread_struct(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | free_task_struct(tsk); |
| 391 | } |
| 392 | EXPORT_SYMBOL(free_task); |
| 393 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | #ifdef CONFIG_MMU |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 395 | static __latent_entropy int dup_mmap(struct mm_struct *mm, |
| 396 | struct mm_struct *oldmm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 398 | struct vm_area_struct *mpnt, *tmp, *prev, **pprev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | struct rb_node **rb_link, *rb_parent; |
| 400 | int retval; |
| 401 | unsigned long charge; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 402 | LIST_HEAD(uf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 404 | uprobe_start_dup_mmap(); |
Michal Hocko | 7c05126 | 2016-05-23 16:25:48 -0700 | [diff] [blame] | 405 | if (down_write_killable(&oldmm->mmap_sem)) { |
| 406 | retval = -EINTR; |
| 407 | goto fail_uprobe_end; |
| 408 | } |
Ralf Baechle | ec8c0446 | 2006-12-12 17:14:57 +0000 | [diff] [blame] | 409 | flush_cache_dup_mm(oldmm); |
Oleg Nesterov | f8ac4ec | 2012-08-08 17:11:42 +0200 | [diff] [blame] | 410 | uprobe_dup_mmap(oldmm, mm); |
Ingo Molnar | ad33945 | 2006-07-03 00:25:15 -0700 | [diff] [blame] | 411 | /* |
| 412 | * Not linked in yet - no deadlock potential: |
| 413 | */ |
| 414 | down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); |
Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 415 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 416 | /* No ordering required: file already has been exposed. */ |
| 417 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); |
| 418 | |
Vladimir Davydov | 4f7d461 | 2014-08-08 14:22:01 -0700 | [diff] [blame] | 419 | mm->total_vm = oldmm->total_vm; |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 420 | mm->data_vm = oldmm->data_vm; |
Vladimir Davydov | 4f7d461 | 2014-08-08 14:22:01 -0700 | [diff] [blame] | 421 | mm->exec_vm = oldmm->exec_vm; |
| 422 | mm->stack_vm = oldmm->stack_vm; |
| 423 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | rb_link = &mm->mm_rb.rb_node; |
| 425 | rb_parent = NULL; |
| 426 | pprev = &mm->mmap; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 427 | retval = ksm_fork(mm, oldmm); |
| 428 | if (retval) |
| 429 | goto out; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 430 | retval = khugepaged_fork(mm, oldmm); |
| 431 | if (retval) |
| 432 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 434 | prev = NULL; |
Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 435 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | struct file *file; |
| 437 | |
| 438 | if (mpnt->vm_flags & VM_DONTCOPY) { |
Konstantin Khlebnikov | 8463833 | 2016-01-14 15:22:07 -0800 | [diff] [blame] | 439 | vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | continue; |
| 441 | } |
| 442 | charge = 0; |
Tetsuo Handa | 655c79b | 2018-06-14 15:26:34 -0700 | [diff] [blame] | 443 | /* |
| 444 | * Don't duplicate many vmas if we've been oom-killed (for |
| 445 | * example) |
| 446 | */ |
| 447 | if (fatal_signal_pending(current)) { |
| 448 | retval = -EINTR; |
| 449 | goto out; |
| 450 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | if (mpnt->vm_flags & VM_ACCOUNT) { |
Huang Shijie | b2412b7 | 2012-07-30 14:42:30 -0700 | [diff] [blame] | 452 | unsigned long len = vma_pages(mpnt); |
| 453 | |
Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 454 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | goto fail_nomem; |
| 456 | charge = len; |
| 457 | } |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 458 | tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | if (!tmp) |
| 460 | goto fail_nomem; |
| 461 | *tmp = *mpnt; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 462 | INIT_LIST_HEAD(&tmp->anon_vma_chain); |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 463 | retval = vma_dup_policy(mpnt, tmp); |
| 464 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | goto fail_nomem_policy; |
Andrea Arcangeli | a247c3a | 2010-09-22 13:05:12 -0700 | [diff] [blame] | 466 | tmp->vm_mm = mm; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 467 | retval = dup_userfaultfd(tmp, &uf); |
| 468 | if (retval) |
| 469 | goto fail_nomem_anon_vma_fork; |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 470 | if (tmp->vm_flags & VM_WIPEONFORK) { |
| 471 | /* VM_WIPEONFORK gets a clean slate in the child. */ |
| 472 | tmp->anon_vma = NULL; |
| 473 | if (anon_vma_prepare(tmp)) |
| 474 | goto fail_nomem_anon_vma_fork; |
| 475 | } else if (anon_vma_fork(tmp, mpnt)) |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 476 | goto fail_nomem_anon_vma_fork; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 477 | tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 478 | tmp->vm_next = tmp->vm_prev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | file = tmp->vm_file; |
| 480 | if (file) { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 481 | struct inode *inode = file_inode(file); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 482 | struct address_space *mapping = file->f_mapping; |
| 483 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | get_file(file); |
| 485 | if (tmp->vm_flags & VM_DENYWRITE) |
| 486 | atomic_dec(&inode->i_writecount); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 487 | i_mmap_lock_write(mapping); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 488 | if (tmp->vm_flags & VM_SHARED) |
David Herrmann | 4bb5f5d | 2014-08-08 14:25:25 -0700 | [diff] [blame] | 489 | atomic_inc(&mapping->i_mmap_writable); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 490 | flush_dcache_mmap_lock(mapping); |
| 491 | /* insert tmp into the share list, just after mpnt */ |
Kirill A. Shutemov | 27ba064 | 2015-02-10 14:09:59 -0800 | [diff] [blame] | 492 | vma_interval_tree_insert_after(tmp, mpnt, |
| 493 | &mapping->i_mmap); |
Hugh Dickins | b88ed20 | 2008-12-10 20:48:52 +0000 | [diff] [blame] | 494 | flush_dcache_mmap_unlock(mapping); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 495 | i_mmap_unlock_write(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | } |
| 497 | |
| 498 | /* |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 499 | * Clear hugetlb-related page reserves for children. This only |
| 500 | * affects MAP_PRIVATE mappings. Faults generated by the child |
| 501 | * are not guaranteed to succeed, even if read-only |
| 502 | */ |
| 503 | if (is_vm_hugetlb_page(tmp)) |
| 504 | reset_vma_resv_huge_pages(tmp); |
| 505 | |
| 506 | /* |
Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 507 | * Link in the new vma and copy the page table entries. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | *pprev = tmp; |
| 510 | pprev = &tmp->vm_next; |
Linus Torvalds | 297c5ee | 2010-08-20 16:24:55 -0700 | [diff] [blame] | 511 | tmp->vm_prev = prev; |
| 512 | prev = tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
| 514 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
| 515 | rb_link = &tmp->vm_rb.rb_right; |
| 516 | rb_parent = &tmp->vm_rb; |
| 517 | |
| 518 | mm->map_count++; |
Rik van Riel | d2cd9ed | 2017-09-06 16:25:15 -0700 | [diff] [blame] | 519 | if (!(tmp->vm_flags & VM_WIPEONFORK)) |
| 520 | retval = copy_page_range(mm, oldmm, mpnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | |
| 522 | if (tmp->vm_ops && tmp->vm_ops->open) |
| 523 | tmp->vm_ops->open(tmp); |
| 524 | |
| 525 | if (retval) |
| 526 | goto out; |
| 527 | } |
Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 528 | /* a new mm has just been created */ |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 529 | arch_dup_mmap(oldmm, mm); |
| 530 | retval = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | out: |
Hugh Dickins | 7ee7823 | 2005-10-29 18:16:08 -0700 | [diff] [blame] | 532 | up_write(&mm->mmap_sem); |
Hugh Dickins | fd3e42f | 2005-10-29 18:16:06 -0700 | [diff] [blame] | 533 | flush_tlb_mm(oldmm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | up_write(&oldmm->mmap_sem); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 535 | dup_userfaultfd_complete(&uf); |
Michal Hocko | 7c05126 | 2016-05-23 16:25:48 -0700 | [diff] [blame] | 536 | fail_uprobe_end: |
Oleg Nesterov | 32cdba1 | 2012-11-14 19:03:42 +0100 | [diff] [blame] | 537 | uprobe_end_dup_mmap(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | return retval; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 539 | fail_nomem_anon_vma_fork: |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 540 | mpol_put(vma_policy(tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | fail_nomem_policy: |
| 542 | kmem_cache_free(vm_area_cachep, tmp); |
| 543 | fail_nomem: |
| 544 | retval = -ENOMEM; |
| 545 | vm_unacct_memory(charge); |
| 546 | goto out; |
| 547 | } |
| 548 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 549 | static inline int mm_alloc_pgd(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | { |
| 551 | mm->pgd = pgd_alloc(mm); |
| 552 | if (unlikely(!mm->pgd)) |
| 553 | return -ENOMEM; |
| 554 | return 0; |
| 555 | } |
| 556 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 557 | static inline void mm_free_pgd(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | { |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 559 | pgd_free(mm, mm->pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | } |
| 561 | #else |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 562 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
| 563 | { |
| 564 | down_write(&oldmm->mmap_sem); |
| 565 | RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm)); |
| 566 | up_write(&oldmm->mmap_sem); |
| 567 | return 0; |
| 568 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | #define mm_alloc_pgd(mm) (0) |
| 570 | #define mm_free_pgd(mm) |
| 571 | #endif /* CONFIG_MMU */ |
| 572 | |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 573 | static void check_mm(struct mm_struct *mm) |
| 574 | { |
| 575 | int i; |
| 576 | |
| 577 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
| 578 | long x = atomic_long_read(&mm->rss_stat.count[i]); |
| 579 | |
| 580 | if (unlikely(x)) |
| 581 | printk(KERN_ALERT "BUG: Bad rss-counter state " |
| 582 | "mm:%p idx:%d val:%ld\n", mm, i, x); |
| 583 | } |
| 584 | |
| 585 | if (mm_pgtables_bytes(mm)) |
| 586 | pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", |
| 587 | mm_pgtables_bytes(mm)); |
| 588 | |
| 589 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 590 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); |
| 591 | #endif |
| 592 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 594 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
| 596 | |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 597 | /* |
| 598 | * Called when the last reference to the mm |
| 599 | * is dropped: either by a lazy thread or by |
| 600 | * mmput. Free the page directory and the mm. |
| 601 | */ |
Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 602 | void __mmdrop(struct mm_struct *mm) |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 603 | { |
| 604 | BUG_ON(mm == &init_mm); |
Mark Rutland | 3eda69c | 2018-04-05 16:25:12 -0700 | [diff] [blame] | 605 | WARN_ON_ONCE(mm == current->mm); |
| 606 | WARN_ON_ONCE(mm == current->active_mm); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 607 | mm_free_pgd(mm); |
| 608 | destroy_context(mm); |
| 609 | hmm_mm_destroy(mm); |
| 610 | mmu_notifier_mm_destroy(mm); |
| 611 | check_mm(mm); |
| 612 | put_user_ns(mm->user_ns); |
| 613 | free_mm(mm); |
| 614 | } |
Andrew Morton | d34bc48 | 2018-02-21 14:45:17 -0800 | [diff] [blame] | 615 | EXPORT_SYMBOL_GPL(__mmdrop); |
Andrew Morton | d70f2a1 | 2018-01-31 16:15:51 -0800 | [diff] [blame] | 616 | |
| 617 | static void mmdrop_async_fn(struct work_struct *work) |
| 618 | { |
| 619 | struct mm_struct *mm; |
| 620 | |
| 621 | mm = container_of(work, struct mm_struct, async_put_work); |
| 622 | __mmdrop(mm); |
| 623 | } |
| 624 | |
| 625 | static void mmdrop_async(struct mm_struct *mm) |
| 626 | { |
| 627 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) { |
| 628 | INIT_WORK(&mm->async_put_work, mmdrop_async_fn); |
| 629 | schedule_work(&mm->async_put_work); |
| 630 | } |
| 631 | } |
| 632 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | static inline void free_signal_struct(struct signal_struct *sig) |
| 634 | { |
| 635 | taskstats_tgid_free(sig); |
| 636 | sched_autogroup_exit(sig); |
| 637 | /* |
| 638 | * __mmdrop is not safe to call from softirq context on x86 due to |
| 639 | * pgd_dtor so postpone it to the async context |
| 640 | */ |
| 641 | if (sig->oom_mm) |
| 642 | mmdrop_async(sig->oom_mm); |
| 643 | kmem_cache_free(signal_cachep, sig); |
| 644 | } |
| 645 | |
| 646 | static inline void put_signal_struct(struct signal_struct *sig) |
| 647 | { |
| 648 | if (atomic_dec_and_test(&sig->sigcnt)) |
| 649 | free_signal_struct(sig); |
| 650 | } |
| 651 | |
| 652 | void __put_task_struct(struct task_struct *tsk) |
| 653 | { |
| 654 | WARN_ON(!tsk->exit_state); |
| 655 | WARN_ON(atomic_read(&tsk->usage)); |
| 656 | WARN_ON(tsk == current); |
| 657 | |
| 658 | cgroup_free(tsk); |
| 659 | task_numa_free(tsk); |
| 660 | security_task_free(tsk); |
| 661 | exit_creds(tsk); |
| 662 | delayacct_tsk_free(tsk); |
| 663 | put_signal_struct(tsk->signal); |
| 664 | |
| 665 | if (!profile_handoff_task(tsk)) |
| 666 | free_task(tsk); |
| 667 | } |
| 668 | EXPORT_SYMBOL_GPL(__put_task_struct); |
| 669 | |
| 670 | void __init __weak arch_task_cache_init(void) { } |
| 671 | |
| 672 | /* |
| 673 | * set_max_threads |
| 674 | */ |
| 675 | static void set_max_threads(unsigned int max_threads_suggested) |
| 676 | { |
| 677 | u64 threads; |
| 678 | |
| 679 | /* |
| 680 | * The number of threads shall be limited such that the thread |
| 681 | * structures may only consume a small part of the available memory. |
| 682 | */ |
| 683 | if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64) |
| 684 | threads = MAX_THREADS; |
| 685 | else |
| 686 | threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, |
| 687 | (u64) THREAD_SIZE * 8UL); |
| 688 | |
| 689 | if (threads > max_threads_suggested) |
| 690 | threads = max_threads_suggested; |
| 691 | |
| 692 | max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); |
| 693 | } |
| 694 | |
| 695 | #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT |
| 696 | /* Initialized by the architecture: */ |
| 697 | int arch_task_struct_size __read_mostly; |
| 698 | #endif |
| 699 | |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 700 | static void task_struct_whitelist(unsigned long *offset, unsigned long *size) |
| 701 | { |
| 702 | /* Fetch thread_struct whitelist for the architecture. */ |
| 703 | arch_thread_struct_whitelist(offset, size); |
| 704 | |
| 705 | /* |
| 706 | * Handle zero-sized whitelist or empty thread_struct, otherwise |
| 707 | * adjust offset to position of thread_struct in task_struct. |
| 708 | */ |
| 709 | if (unlikely(*size == 0)) |
| 710 | *offset = 0; |
| 711 | else |
| 712 | *offset += offsetof(struct task_struct, thread); |
| 713 | } |
| 714 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | void __init fork_init(void) |
| 716 | { |
| 717 | int i; |
| 718 | #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR |
| 719 | #ifndef ARCH_MIN_TASKALIGN |
| 720 | #define ARCH_MIN_TASKALIGN 0 |
| 721 | #endif |
| 722 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 723 | unsigned long useroffset, usersize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | |
| 725 | /* create a slab on which task_structs can be allocated */ |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 726 | task_struct_whitelist(&useroffset, &usersize); |
| 727 | task_struct_cachep = kmem_cache_create_usercopy("task_struct", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | arch_task_struct_size, align, |
Kees Cook | 5905429 | 2017-08-16 13:00:58 -0700 | [diff] [blame] | 729 | SLAB_PANIC|SLAB_ACCOUNT, |
| 730 | useroffset, usersize, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | #endif |
| 732 | |
| 733 | /* do the arch specific task caches init */ |
| 734 | arch_task_cache_init(); |
| 735 | |
| 736 | set_max_threads(MAX_THREADS); |
| 737 | |
| 738 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
| 739 | init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; |
| 740 | init_task.signal->rlim[RLIMIT_SIGPENDING] = |
| 741 | init_task.signal->rlim[RLIMIT_NPROC]; |
| 742 | |
| 743 | for (i = 0; i < UCOUNT_COUNTS; i++) { |
| 744 | init_user_ns.ucount_max[i] = max_threads/2; |
| 745 | } |
| 746 | |
| 747 | #ifdef CONFIG_VMAP_STACK |
| 748 | cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", |
| 749 | NULL, free_vm_stack_cache); |
| 750 | #endif |
| 751 | |
| 752 | lockdep_init_task(&init_task); |
| 753 | } |
| 754 | |
| 755 | int __weak arch_dup_task_struct(struct task_struct *dst, |
| 756 | struct task_struct *src) |
| 757 | { |
| 758 | *dst = *src; |
| 759 | return 0; |
| 760 | } |
| 761 | |
| 762 | void set_task_stack_end_magic(struct task_struct *tsk) |
| 763 | { |
| 764 | unsigned long *stackend; |
| 765 | |
| 766 | stackend = end_of_stack(tsk); |
| 767 | *stackend = STACK_END_MAGIC; /* for overflow detection */ |
| 768 | } |
| 769 | |
| 770 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
| 771 | { |
| 772 | struct task_struct *tsk; |
| 773 | unsigned long *stack; |
| 774 | struct vm_struct *stack_vm_area; |
| 775 | int err; |
| 776 | |
| 777 | if (node == NUMA_NO_NODE) |
| 778 | node = tsk_fork_get_node(orig); |
| 779 | tsk = alloc_task_struct_node(node); |
| 780 | if (!tsk) |
| 781 | return NULL; |
| 782 | |
| 783 | stack = alloc_thread_stack_node(tsk, node); |
| 784 | if (!stack) |
| 785 | goto free_tsk; |
| 786 | |
| 787 | stack_vm_area = task_stack_vm_area(tsk); |
| 788 | |
| 789 | err = arch_dup_task_struct(tsk, orig); |
| 790 | |
| 791 | /* |
| 792 | * arch_dup_task_struct() clobbers the stack-related fields. Make |
| 793 | * sure they're properly initialized before using any stack-related |
| 794 | * functions again. |
| 795 | */ |
| 796 | tsk->stack = stack; |
| 797 | #ifdef CONFIG_VMAP_STACK |
| 798 | tsk->stack_vm_area = stack_vm_area; |
| 799 | #endif |
| 800 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 801 | atomic_set(&tsk->stack_refcount, 1); |
| 802 | #endif |
| 803 | |
| 804 | if (err) |
| 805 | goto free_stack; |
| 806 | |
| 807 | #ifdef CONFIG_SECCOMP |
| 808 | /* |
| 809 | * We must handle setting up seccomp filters once we're under |
| 810 | * the sighand lock in case orig has changed between now and |
| 811 | * then. Until then, filter must be NULL to avoid messing up |
| 812 | * the usage counts on the error path calling free_task. |
| 813 | */ |
| 814 | tsk->seccomp.filter = NULL; |
| 815 | #endif |
| 816 | |
| 817 | setup_thread_stack(tsk, orig); |
| 818 | clear_user_return_notifier(tsk); |
| 819 | clear_tsk_need_resched(tsk); |
| 820 | set_task_stack_end_magic(tsk); |
| 821 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 822 | #ifdef CONFIG_STACKPROTECTOR |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | tsk->stack_canary = get_random_canary(); |
| 824 | #endif |
| 825 | |
| 826 | /* |
| 827 | * One for us, one for whoever does the "release_task()" (usually |
| 828 | * parent) |
| 829 | */ |
| 830 | atomic_set(&tsk->usage, 2); |
| 831 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
| 832 | tsk->btrace_seq = 0; |
| 833 | #endif |
| 834 | tsk->splice_pipe = NULL; |
| 835 | tsk->task_frag.page = NULL; |
| 836 | tsk->wake_q.next = NULL; |
| 837 | |
| 838 | account_kernel_stack(tsk, 1); |
| 839 | |
| 840 | kcov_task_init(tsk); |
| 841 | |
| 842 | #ifdef CONFIG_FAULT_INJECTION |
| 843 | tsk->fail_nth = 0; |
| 844 | #endif |
| 845 | |
| 846 | return tsk; |
| 847 | |
| 848 | free_stack: |
| 849 | free_thread_stack(tsk); |
| 850 | free_tsk: |
| 851 | free_task_struct(tsk); |
| 852 | return NULL; |
| 853 | } |
| 854 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); |
| 856 | |
Hidehiro Kawai | 4cb0e11 | 2009-01-06 14:42:47 -0800 | [diff] [blame] | 857 | static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; |
| 858 | |
| 859 | static int __init coredump_filter_setup(char *s) |
| 860 | { |
| 861 | default_dump_filter = |
| 862 | (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & |
| 863 | MMF_DUMP_FILTER_MASK; |
| 864 | return 1; |
| 865 | } |
| 866 | |
| 867 | __setup("coredump_filter=", coredump_filter_setup); |
| 868 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | #include <linux/init_task.h> |
| 870 | |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 871 | static void mm_init_aio(struct mm_struct *mm) |
| 872 | { |
| 873 | #ifdef CONFIG_AIO |
| 874 | spin_lock_init(&mm->ioctx_lock); |
Benjamin LaHaise | db446a0 | 2013-07-30 12:54:40 -0400 | [diff] [blame] | 875 | mm->ioctx_table = NULL; |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 876 | #endif |
| 877 | } |
| 878 | |
Vladimir Davydov | 33144e8 | 2014-08-08 14:22:03 -0700 | [diff] [blame] | 879 | static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) |
| 880 | { |
| 881 | #ifdef CONFIG_MEMCG |
| 882 | mm->owner = p; |
| 883 | #endif |
| 884 | } |
| 885 | |
Eric Biggers | 355627f | 2017-08-31 16:15:26 -0700 | [diff] [blame] | 886 | static void mm_init_uprobes_state(struct mm_struct *mm) |
| 887 | { |
| 888 | #ifdef CONFIG_UPROBES |
| 889 | mm->uprobes_state.xol_area = NULL; |
| 890 | #endif |
| 891 | } |
| 892 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 893 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, |
| 894 | struct user_namespace *user_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | { |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 896 | mm->mmap = NULL; |
| 897 | mm->mm_rb = RB_ROOT; |
| 898 | mm->vmacache_seqnum = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | atomic_set(&mm->mm_users, 1); |
| 900 | atomic_set(&mm->mm_count, 1); |
| 901 | init_rwsem(&mm->mmap_sem); |
| 902 | INIT_LIST_HEAD(&mm->mmlist); |
Oleg Nesterov | 999d9fc | 2008-07-25 01:47:41 -0700 | [diff] [blame] | 903 | mm->core_state = NULL; |
Kirill A. Shutemov | af5b0f6 | 2017-11-15 17:35:40 -0800 | [diff] [blame] | 904 | mm_pgtables_bytes_init(mm); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 905 | mm->map_count = 0; |
| 906 | mm->locked_vm = 0; |
Vladimir Davydov | ce65cef | 2014-08-08 14:21:58 -0700 | [diff] [blame] | 907 | mm->pinned_vm = 0; |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 908 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | spin_lock_init(&mm->page_table_lock); |
Yang Shi | 88aa7cc | 2018-06-07 17:05:28 -0700 | [diff] [blame] | 910 | spin_lock_init(&mm->arg_lock); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 911 | mm_init_cpumask(mm); |
Alexey Dobriyan | 858f099 | 2009-09-23 15:57:32 -0700 | [diff] [blame] | 912 | mm_init_aio(mm); |
Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 913 | mm_init_owner(mm, p); |
Eric Biggers | 2b7e866 | 2017-08-25 15:55:43 -0700 | [diff] [blame] | 914 | RCU_INIT_POINTER(mm->exe_file, NULL); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 915 | mmu_notifier_mm_init(mm); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 916 | hmm_mm_init(mm); |
Nadav Amit | 16af97d | 2017-08-10 15:23:56 -0700 | [diff] [blame] | 917 | init_tlb_flush_pending(mm); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 918 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 919 | mm->pmd_huge_pte = NULL; |
| 920 | #endif |
Eric Biggers | 355627f | 2017-08-31 16:15:26 -0700 | [diff] [blame] | 921 | mm_init_uprobes_state(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | |
Alex Thorlton | a0715cc | 2014-04-07 15:37:10 -0700 | [diff] [blame] | 923 | if (current->mm) { |
| 924 | mm->flags = current->mm->flags & MMF_INIT_MASK; |
| 925 | mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; |
| 926 | } else { |
| 927 | mm->flags = default_dump_filter; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | mm->def_flags = 0; |
Alex Thorlton | a0715cc | 2014-04-07 15:37:10 -0700 | [diff] [blame] | 929 | } |
| 930 | |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 931 | if (mm_alloc_pgd(mm)) |
| 932 | goto fail_nopgd; |
Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 933 | |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 934 | if (init_new_context(p, mm)) |
| 935 | goto fail_nocontext; |
| 936 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 937 | mm->user_ns = get_user_ns(user_ns); |
Vladimir Davydov | 41f727f | 2014-08-08 14:21:56 -0700 | [diff] [blame] | 938 | return mm; |
| 939 | |
| 940 | fail_nocontext: |
| 941 | mm_free_pgd(mm); |
| 942 | fail_nopgd: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | free_mm(mm); |
| 944 | return NULL; |
| 945 | } |
| 946 | |
| 947 | /* |
| 948 | * Allocate and initialize an mm_struct. |
| 949 | */ |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 950 | struct mm_struct *mm_alloc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | { |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 952 | struct mm_struct *mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | |
| 954 | mm = allocate_mm(); |
KOSAKI Motohiro | de03c72 | 2011-05-24 17:12:15 -0700 | [diff] [blame] | 955 | if (!mm) |
| 956 | return NULL; |
| 957 | |
| 958 | memset(mm, 0, sizeof(*mm)); |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 959 | return mm_init(mm, current, current_user_ns()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | } |
| 961 | |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 962 | static inline void __mmput(struct mm_struct *mm) |
| 963 | { |
| 964 | VM_BUG_ON(atomic_read(&mm->mm_users)); |
| 965 | |
| 966 | uprobe_clear_state(mm); |
| 967 | exit_aio(mm); |
| 968 | ksm_exit(mm); |
| 969 | khugepaged_exit(mm); /* must run before exit_mmap */ |
| 970 | exit_mmap(mm); |
Aaron Lu | 6fcb52a | 2016-10-07 17:00:08 -0700 | [diff] [blame] | 971 | mm_put_huge_zero_page(mm); |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 972 | set_mm_exe_file(mm, NULL); |
| 973 | if (!list_empty(&mm->mmlist)) { |
| 974 | spin_lock(&mmlist_lock); |
| 975 | list_del(&mm->mmlist); |
| 976 | spin_unlock(&mmlist_lock); |
| 977 | } |
| 978 | if (mm->binfmt) |
| 979 | module_put(mm->binfmt->module); |
| 980 | mmdrop(mm); |
| 981 | } |
| 982 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 983 | /* |
| 984 | * Decrement the use count and release all resources for an mm. |
| 985 | */ |
| 986 | void mmput(struct mm_struct *mm) |
| 987 | { |
Andrew Morton | 0ae26f1 | 2006-06-23 02:05:15 -0700 | [diff] [blame] | 988 | might_sleep(); |
| 989 | |
Michal Hocko | ec8d7c1 | 2016-05-20 16:57:21 -0700 | [diff] [blame] | 990 | if (atomic_dec_and_test(&mm->mm_users)) |
| 991 | __mmput(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | } |
| 993 | EXPORT_SYMBOL_GPL(mmput); |
| 994 | |
Sherry Yang | a1b2289 | 2017-10-03 16:15:00 -0700 | [diff] [blame] | 995 | #ifdef CONFIG_MMU |
| 996 | static void mmput_async_fn(struct work_struct *work) |
| 997 | { |
| 998 | struct mm_struct *mm = container_of(work, struct mm_struct, |
| 999 | async_put_work); |
| 1000 | |
| 1001 | __mmput(mm); |
| 1002 | } |
| 1003 | |
| 1004 | void mmput_async(struct mm_struct *mm) |
| 1005 | { |
| 1006 | if (atomic_dec_and_test(&mm->mm_users)) { |
| 1007 | INIT_WORK(&mm->async_put_work, mmput_async_fn); |
| 1008 | schedule_work(&mm->async_put_work); |
| 1009 | } |
| 1010 | } |
| 1011 | #endif |
| 1012 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1013 | /** |
| 1014 | * set_mm_exe_file - change a reference to the mm's executable file |
| 1015 | * |
| 1016 | * This changes mm's executable file (shown as symlink /proc/[pid]/exe). |
| 1017 | * |
Davidlohr Bueso | 6e399cd | 2015-04-16 12:47:59 -0700 | [diff] [blame] | 1018 | * Main users are mmput() and sys_execve(). Callers prevent concurrent |
| 1019 | * invocations: in mmput() nobody alive left, in execve task is single |
| 1020 | * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the |
| 1021 | * mm->exe_file, but does so without using set_mm_exe_file() in order |
| 1022 | * to do avoid the need for any locks. |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1023 | */ |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1024 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) |
| 1025 | { |
Davidlohr Bueso | 6e399cd | 2015-04-16 12:47:59 -0700 | [diff] [blame] | 1026 | struct file *old_exe_file; |
| 1027 | |
| 1028 | /* |
| 1029 | * It is safe to dereference the exe_file without RCU as |
| 1030 | * this function is only called if nobody else can access |
| 1031 | * this mm -- see comment above for justification. |
| 1032 | */ |
| 1033 | old_exe_file = rcu_dereference_raw(mm->exe_file); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1034 | |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1035 | if (new_exe_file) |
| 1036 | get_file(new_exe_file); |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1037 | rcu_assign_pointer(mm->exe_file, new_exe_file); |
| 1038 | if (old_exe_file) |
| 1039 | fput(old_exe_file); |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1040 | } |
| 1041 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1042 | /** |
| 1043 | * get_mm_exe_file - acquire a reference to the mm's executable file |
| 1044 | * |
| 1045 | * Returns %NULL if mm has no associated executable file. |
| 1046 | * User must release file via fput(). |
| 1047 | */ |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1048 | struct file *get_mm_exe_file(struct mm_struct *mm) |
| 1049 | { |
| 1050 | struct file *exe_file; |
| 1051 | |
Konstantin Khlebnikov | 90f31d0 | 2015-04-16 12:47:56 -0700 | [diff] [blame] | 1052 | rcu_read_lock(); |
| 1053 | exe_file = rcu_dereference(mm->exe_file); |
| 1054 | if (exe_file && !get_file_rcu(exe_file)) |
| 1055 | exe_file = NULL; |
| 1056 | rcu_read_unlock(); |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1057 | return exe_file; |
| 1058 | } |
Davidlohr Bueso | 1116334 | 2015-04-16 12:49:12 -0700 | [diff] [blame] | 1059 | EXPORT_SYMBOL(get_mm_exe_file); |
Jiri Slaby | 3864601 | 2011-05-26 16:25:46 -0700 | [diff] [blame] | 1060 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | /** |
Mateusz Guzik | cd81a917 | 2016-08-23 16:20:38 +0200 | [diff] [blame] | 1062 | * get_task_exe_file - acquire a reference to the task's executable file |
| 1063 | * |
| 1064 | * Returns %NULL if task's mm (if any) has no associated executable file or |
| 1065 | * this is a kernel thread with borrowed mm (see the comment above get_task_mm). |
| 1066 | * User must release file via fput(). |
| 1067 | */ |
| 1068 | struct file *get_task_exe_file(struct task_struct *task) |
| 1069 | { |
| 1070 | struct file *exe_file = NULL; |
| 1071 | struct mm_struct *mm; |
| 1072 | |
| 1073 | task_lock(task); |
| 1074 | mm = task->mm; |
| 1075 | if (mm) { |
| 1076 | if (!(task->flags & PF_KTHREAD)) |
| 1077 | exe_file = get_mm_exe_file(mm); |
| 1078 | } |
| 1079 | task_unlock(task); |
| 1080 | return exe_file; |
| 1081 | } |
| 1082 | EXPORT_SYMBOL(get_task_exe_file); |
| 1083 | |
| 1084 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | * get_task_mm - acquire a reference to the task's mm |
| 1086 | * |
Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 1087 | * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | * this kernel workthread has transiently adopted a user mm with use_mm, |
| 1089 | * to do its AIO) is not set and if so returns a reference to it, after |
| 1090 | * bumping up the use count. User must release the mm via mmput() |
| 1091 | * after use. Typically used by /proc and ptrace. |
| 1092 | */ |
| 1093 | struct mm_struct *get_task_mm(struct task_struct *task) |
| 1094 | { |
| 1095 | struct mm_struct *mm; |
| 1096 | |
| 1097 | task_lock(task); |
| 1098 | mm = task->mm; |
| 1099 | if (mm) { |
Oleg Nesterov | 246bb0b | 2008-07-25 01:47:38 -0700 | [diff] [blame] | 1100 | if (task->flags & PF_KTHREAD) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1101 | mm = NULL; |
| 1102 | else |
Vegard Nossum | 3fce371 | 2017-02-27 14:30:10 -0800 | [diff] [blame] | 1103 | mmget(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | } |
| 1105 | task_unlock(task); |
| 1106 | return mm; |
| 1107 | } |
| 1108 | EXPORT_SYMBOL_GPL(get_task_mm); |
| 1109 | |
Christopher Yeoh | 8cdb878 | 2012-02-02 11:34:09 +1030 | [diff] [blame] | 1110 | struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) |
| 1111 | { |
| 1112 | struct mm_struct *mm; |
| 1113 | int err; |
| 1114 | |
| 1115 | err = mutex_lock_killable(&task->signal->cred_guard_mutex); |
| 1116 | if (err) |
| 1117 | return ERR_PTR(err); |
| 1118 | |
| 1119 | mm = get_task_mm(task); |
| 1120 | if (mm && mm != current->mm && |
| 1121 | !ptrace_may_access(task, mode)) { |
| 1122 | mmput(mm); |
| 1123 | mm = ERR_PTR(-EACCES); |
| 1124 | } |
| 1125 | mutex_unlock(&task->signal->cred_guard_mutex); |
| 1126 | |
| 1127 | return mm; |
| 1128 | } |
| 1129 | |
Oleg Nesterov | 57b59c4 | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1130 | static void complete_vfork_done(struct task_struct *tsk) |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1131 | { |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1132 | struct completion *vfork; |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1133 | |
Oleg Nesterov | d68b46f | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1134 | task_lock(tsk); |
| 1135 | vfork = tsk->vfork_done; |
| 1136 | if (likely(vfork)) { |
| 1137 | tsk->vfork_done = NULL; |
| 1138 | complete(vfork); |
| 1139 | } |
| 1140 | task_unlock(tsk); |
| 1141 | } |
| 1142 | |
| 1143 | static int wait_for_vfork_done(struct task_struct *child, |
| 1144 | struct completion *vfork) |
| 1145 | { |
| 1146 | int killed; |
| 1147 | |
| 1148 | freezer_do_not_count(); |
| 1149 | killed = wait_for_completion_killable(vfork); |
| 1150 | freezer_count(); |
| 1151 | |
| 1152 | if (killed) { |
| 1153 | task_lock(child); |
| 1154 | child->vfork_done = NULL; |
| 1155 | task_unlock(child); |
| 1156 | } |
| 1157 | |
| 1158 | put_task_struct(child); |
| 1159 | return killed; |
Oleg Nesterov | c415c3b | 2012-03-05 14:59:13 -0800 | [diff] [blame] | 1160 | } |
| 1161 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | /* Please note the differences between mmput and mm_release. |
| 1163 | * mmput is called whenever we stop holding onto a mm_struct, |
| 1164 | * error success whatever. |
| 1165 | * |
| 1166 | * mm_release is called after a mm_struct has been removed |
| 1167 | * from the current process. |
| 1168 | * |
| 1169 | * This difference is important for error handling, when we |
| 1170 | * only half set up a mm_struct for a new process and need to restore |
| 1171 | * the old one. Because we mmput the new mm_struct before |
| 1172 | * restoring the old one. . . |
| 1173 | * Eric Biederman 10 January 1998 |
| 1174 | */ |
| 1175 | void mm_release(struct task_struct *tsk, struct mm_struct *mm) |
| 1176 | { |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1177 | /* Get rid of any futexes when releasing the mm */ |
| 1178 | #ifdef CONFIG_FUTEX |
Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 1179 | if (unlikely(tsk->robust_list)) { |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1180 | exit_robust_list(tsk); |
Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 1181 | tsk->robust_list = NULL; |
| 1182 | } |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1183 | #ifdef CONFIG_COMPAT |
Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 1184 | if (unlikely(tsk->compat_robust_list)) { |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1185 | compat_exit_robust_list(tsk); |
Peter Zijlstra | fc6b177 | 2009-10-05 18:17:32 +0200 | [diff] [blame] | 1186 | tsk->compat_robust_list = NULL; |
| 1187 | } |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1188 | #endif |
Thomas Gleixner | 322a2c1 | 2009-10-05 18:18:03 +0200 | [diff] [blame] | 1189 | if (unlikely(!list_empty(&tsk->pi_state_list))) |
| 1190 | exit_pi_state_list(tsk); |
Linus Torvalds | 8141c7f | 2008-11-15 10:20:36 -0800 | [diff] [blame] | 1191 | #endif |
| 1192 | |
Srikar Dronamraju | 0326f5a9 | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1193 | uprobe_free_utask(tsk); |
| 1194 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | /* Get rid of any cached register state */ |
| 1196 | deactivate_mm(tsk, mm); |
| 1197 | |
Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 1198 | /* |
Michal Hocko | 735f277 | 2016-09-01 16:15:13 -0700 | [diff] [blame] | 1199 | * Signal userspace if we're not exiting with a core dump |
| 1200 | * because we want to leave the value intact for debugging |
| 1201 | * purposes. |
Roland McGrath | fec1d01 | 2006-12-06 20:36:34 -0800 | [diff] [blame] | 1202 | */ |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1203 | if (tsk->clear_child_tid) { |
Michal Hocko | 735f277 | 2016-09-01 16:15:13 -0700 | [diff] [blame] | 1204 | if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) && |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1205 | atomic_read(&mm->mm_users) > 1) { |
| 1206 | /* |
| 1207 | * We don't check the error code - if userspace has |
| 1208 | * not set up a proper pointer then tough luck. |
| 1209 | */ |
| 1210 | put_user(0, tsk->clear_child_tid); |
Dominik Brodowski | 2de0db9 | 2018-03-11 11:34:26 +0100 | [diff] [blame] | 1211 | do_futex(tsk->clear_child_tid, FUTEX_WAKE, |
| 1212 | 1, NULL, NULL, 0, 0); |
Eric Dumazet | 9c8a822 | 2009-08-06 15:09:28 -0700 | [diff] [blame] | 1213 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | tsk->clear_child_tid = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | } |
Konstantin Khlebnikov | f7505d64 | 2012-05-31 16:26:21 -0700 | [diff] [blame] | 1216 | |
| 1217 | /* |
| 1218 | * All done, finally we can wake up parent and return this mm to him. |
| 1219 | * Also kthread_stop() uses this completion for synchronization. |
| 1220 | */ |
| 1221 | if (tsk->vfork_done) |
| 1222 | complete_vfork_done(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | } |
| 1224 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1225 | /* |
| 1226 | * Allocate a new mm structure and copy contents from the |
| 1227 | * mm structure of the passed in task structure. |
| 1228 | */ |
DaeSeok Youn | ff252c1 | 2014-01-23 15:55:46 -0800 | [diff] [blame] | 1229 | static struct mm_struct *dup_mm(struct task_struct *tsk) |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1230 | { |
| 1231 | struct mm_struct *mm, *oldmm = current->mm; |
| 1232 | int err; |
| 1233 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1234 | mm = allocate_mm(); |
| 1235 | if (!mm) |
| 1236 | goto fail_nomem; |
| 1237 | |
| 1238 | memcpy(mm, oldmm, sizeof(*mm)); |
| 1239 | |
Eric W. Biederman | bfedb58 | 2016-10-13 21:23:16 -0500 | [diff] [blame] | 1240 | if (!mm_init(mm, tsk, mm->user_ns)) |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1241 | goto fail_nomem; |
| 1242 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1243 | err = dup_mmap(mm, oldmm); |
| 1244 | if (err) |
| 1245 | goto free_pt; |
| 1246 | |
| 1247 | mm->hiwater_rss = get_mm_rss(mm); |
| 1248 | mm->hiwater_vm = mm->total_vm; |
| 1249 | |
Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 1250 | if (mm->binfmt && !try_module_get(mm->binfmt->module)) |
| 1251 | goto free_pt; |
| 1252 | |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1253 | return mm; |
| 1254 | |
| 1255 | free_pt: |
Hiroshi Shimamoto | 801460d | 2009-09-23 15:57:41 -0700 | [diff] [blame] | 1256 | /* don't put binfmt in mmput, we haven't got module yet */ |
| 1257 | mm->binfmt = NULL; |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1258 | mmput(mm); |
| 1259 | |
| 1260 | fail_nomem: |
| 1261 | return NULL; |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1262 | } |
| 1263 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1264 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1265 | { |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1266 | struct mm_struct *mm, *oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | int retval; |
| 1268 | |
| 1269 | tsk->min_flt = tsk->maj_flt = 0; |
| 1270 | tsk->nvcsw = tsk->nivcsw = 0; |
Mandeep Singh Baines | 17406b8 | 2009-02-06 15:37:47 -0800 | [diff] [blame] | 1271 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 1272 | tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; |
| 1273 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | |
| 1275 | tsk->mm = NULL; |
| 1276 | tsk->active_mm = NULL; |
| 1277 | |
| 1278 | /* |
| 1279 | * Are we cloning a kernel thread? |
| 1280 | * |
| 1281 | * We need to steal a active VM for that.. |
| 1282 | */ |
| 1283 | oldmm = current->mm; |
| 1284 | if (!oldmm) |
| 1285 | return 0; |
| 1286 | |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 1287 | /* initialize the new vmacache entries */ |
| 1288 | vmacache_flush(tsk); |
| 1289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | if (clone_flags & CLONE_VM) { |
Vegard Nossum | 3fce371 | 2017-02-27 14:30:10 -0800 | [diff] [blame] | 1291 | mmget(oldmm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1292 | mm = oldmm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | goto good_mm; |
| 1294 | } |
| 1295 | |
| 1296 | retval = -ENOMEM; |
JANAK DESAI | a0a7ec3 | 2006-02-07 12:59:01 -0800 | [diff] [blame] | 1297 | mm = dup_mm(tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1298 | if (!mm) |
| 1299 | goto fail_nomem; |
| 1300 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | good_mm: |
| 1302 | tsk->mm = mm; |
| 1303 | tsk->active_mm = mm; |
| 1304 | return 0; |
| 1305 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | fail_nomem: |
| 1307 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | } |
| 1309 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1310 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | { |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1312 | struct fs_struct *fs = current->fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | if (clone_flags & CLONE_FS) { |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1314 | /* tsk->fs is already what we want */ |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1315 | spin_lock(&fs->lock); |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1316 | if (fs->in_exec) { |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1317 | spin_unlock(&fs->lock); |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1318 | return -EAGAIN; |
| 1319 | } |
| 1320 | fs->users++; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 1321 | spin_unlock(&fs->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | return 0; |
| 1323 | } |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 1324 | tsk->fs = copy_fs_struct(fs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | if (!tsk->fs) |
| 1326 | return -ENOMEM; |
| 1327 | return 0; |
| 1328 | } |
| 1329 | |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1330 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk) |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1331 | { |
| 1332 | struct files_struct *oldf, *newf; |
| 1333 | int error = 0; |
| 1334 | |
| 1335 | /* |
| 1336 | * A background process may not have any files ... |
| 1337 | */ |
| 1338 | oldf = current->files; |
| 1339 | if (!oldf) |
| 1340 | goto out; |
| 1341 | |
| 1342 | if (clone_flags & CLONE_FILES) { |
| 1343 | atomic_inc(&oldf->count); |
| 1344 | goto out; |
| 1345 | } |
| 1346 | |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 1347 | newf = dup_fd(oldf, &error); |
| 1348 | if (!newf) |
| 1349 | goto out; |
| 1350 | |
| 1351 | tsk->files = newf; |
| 1352 | error = 0; |
| 1353 | out: |
| 1354 | return error; |
| 1355 | } |
| 1356 | |
Jens Axboe | fadad878 | 2008-01-24 08:54:47 +0100 | [diff] [blame] | 1357 | static int copy_io(unsigned long clone_flags, struct task_struct *tsk) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1358 | { |
| 1359 | #ifdef CONFIG_BLOCK |
| 1360 | struct io_context *ioc = current->io_context; |
Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 1361 | struct io_context *new_ioc; |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1362 | |
| 1363 | if (!ioc) |
| 1364 | return 0; |
Jens Axboe | fadad878 | 2008-01-24 08:54:47 +0100 | [diff] [blame] | 1365 | /* |
| 1366 | * Share io context with parent, if CLONE_IO is set |
| 1367 | */ |
| 1368 | if (clone_flags & CLONE_IO) { |
Tejun Heo | 3d48749 | 2012-03-05 13:15:25 -0800 | [diff] [blame] | 1369 | ioc_task_link(ioc); |
| 1370 | tsk->io_context = ioc; |
Jens Axboe | fadad878 | 2008-01-24 08:54:47 +0100 | [diff] [blame] | 1371 | } else if (ioprio_valid(ioc->ioprio)) { |
Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 1372 | new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE); |
| 1373 | if (unlikely(!new_ioc)) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1374 | return -ENOMEM; |
| 1375 | |
Tejun Heo | 6e736be | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 1376 | new_ioc->ioprio = ioc->ioprio; |
Tejun Heo | 11a3122 | 2012-02-07 07:51:30 +0100 | [diff] [blame] | 1377 | put_io_context(new_ioc); |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1378 | } |
| 1379 | #endif |
| 1380 | return 0; |
| 1381 | } |
| 1382 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1383 | static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1384 | { |
| 1385 | struct sighand_struct *sig; |
| 1386 | |
Zhaolei | 6034880 | 2009-01-06 14:40:46 -0800 | [diff] [blame] | 1387 | if (clone_flags & CLONE_SIGHAND) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | atomic_inc(¤t->sighand->count); |
| 1389 | return 0; |
| 1390 | } |
| 1391 | sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 1392 | rcu_assign_pointer(tsk->sighand, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | if (!sig) |
| 1394 | return -ENOMEM; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 1395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1396 | atomic_set(&sig->count, 1); |
| 1397 | memcpy(sig->action, current->sighand->action, sizeof(sig->action)); |
| 1398 | return 0; |
| 1399 | } |
| 1400 | |
Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 1401 | void __cleanup_sighand(struct sighand_struct *sighand) |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1402 | { |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 1403 | if (atomic_dec_and_test(&sighand->count)) { |
| 1404 | signalfd_cleanup(sighand); |
Oleg Nesterov | 392809b | 2014-09-28 23:44:18 +0200 | [diff] [blame] | 1405 | /* |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 1406 | * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it |
Oleg Nesterov | 392809b | 2014-09-28 23:44:18 +0200 | [diff] [blame] | 1407 | * without an RCU grace period, see __lock_task_sighand(). |
| 1408 | */ |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1409 | kmem_cache_free(sighand_cachep, sighand); |
Oleg Nesterov | d80e731 | 2012-02-24 20:07:11 +0100 | [diff] [blame] | 1410 | } |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 1411 | } |
| 1412 | |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1413 | #ifdef CONFIG_POSIX_TIMERS |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1414 | /* |
| 1415 | * Initialize POSIX timer handling for a thread group. |
| 1416 | */ |
| 1417 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
| 1418 | { |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1419 | unsigned long cpu_limit; |
| 1420 | |
Jason Low | 316c1608d | 2015-04-28 13:00:20 -0700 | [diff] [blame] | 1421 | cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1422 | if (cpu_limit != RLIM_INFINITY) { |
Frederic Weisbecker | ebd7e7f | 2017-01-31 04:09:34 +0100 | [diff] [blame] | 1423 | sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC; |
Jason Low | d5c373e | 2015-10-14 12:07:55 -0700 | [diff] [blame] | 1424 | sig->cputimer.running = true; |
Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1425 | } |
| 1426 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1427 | /* The timer lists. */ |
| 1428 | INIT_LIST_HEAD(&sig->cpu_timers[0]); |
| 1429 | INIT_LIST_HEAD(&sig->cpu_timers[1]); |
| 1430 | INIT_LIST_HEAD(&sig->cpu_timers[2]); |
| 1431 | } |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1432 | #else |
| 1433 | static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { } |
| 1434 | #endif |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1435 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1436 | static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | { |
| 1438 | struct signal_struct *sig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1439 | |
Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 1440 | if (clone_flags & CLONE_THREAD) |
Peter Zijlstra | 490dea4 | 2008-11-24 17:06:57 +0100 | [diff] [blame] | 1441 | return 0; |
Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1442 | |
Veaceslav Falico | a56704e | 2010-03-10 15:23:01 -0800 | [diff] [blame] | 1443 | sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1444 | tsk->signal = sig; |
| 1445 | if (!sig) |
| 1446 | return -ENOMEM; |
| 1447 | |
Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1448 | sig->nr_threads = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1449 | atomic_set(&sig->live, 1); |
Oleg Nesterov | b3ac022 | 2010-05-26 14:43:24 -0700 | [diff] [blame] | 1450 | atomic_set(&sig->sigcnt, 1); |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 1451 | |
| 1452 | /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ |
| 1453 | sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); |
| 1454 | tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); |
| 1455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | init_waitqueue_head(&sig->wait_chldexit); |
Oleg Nesterov | db51aec | 2008-04-30 00:52:52 -0700 | [diff] [blame] | 1457 | sig->curr_target = tsk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1458 | init_sigpending(&sig->shared_pending); |
Rik van Riel | e78c349 | 2014-08-16 13:40:10 -0400 | [diff] [blame] | 1459 | seqlock_init(&sig->stats_lock); |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 1460 | prev_cputime_init(&sig->prev_cputime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | |
Nicolas Pitre | baa73d9 | 2016-11-11 00:10:10 -0500 | [diff] [blame] | 1462 | #ifdef CONFIG_POSIX_TIMERS |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1463 | INIT_LIST_HEAD(&sig->posix_timers); |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1464 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1465 | sig->real_timer.function = it_real_fn; |
Nicolas Pitre | baa73d9 | 2016-11-11 00:10:10 -0500 | [diff] [blame] | 1466 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1467 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | task_lock(current->group_leader); |
| 1469 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
| 1470 | task_unlock(current->group_leader); |
| 1471 | |
Oleg Nesterov | 6279a751 | 2009-03-27 01:06:07 +0100 | [diff] [blame] | 1472 | posix_cpu_timers_init_group(sig); |
| 1473 | |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1474 | tty_audit_fork(sig); |
Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 1475 | sched_autogroup_fork(sig); |
Miloslav Trmac | 522ed77 | 2007-07-15 23:40:56 -0700 | [diff] [blame] | 1476 | |
David Rientjes | a63d83f | 2010-08-09 17:19:46 -0700 | [diff] [blame] | 1477 | sig->oom_score_adj = current->signal->oom_score_adj; |
Mandeep Singh Baines | dabb16f63 | 2011-01-13 15:46:05 -0800 | [diff] [blame] | 1478 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
KOSAKI Motohiro | 28b83c5 | 2009-09-21 17:03:13 -0700 | [diff] [blame] | 1479 | |
KOSAKI Motohiro | 9b1bf12 | 2010-10-27 15:34:08 -0700 | [diff] [blame] | 1480 | mutex_init(&sig->cred_guard_mutex); |
| 1481 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1482 | return 0; |
| 1483 | } |
| 1484 | |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1485 | static void copy_seccomp(struct task_struct *p) |
| 1486 | { |
| 1487 | #ifdef CONFIG_SECCOMP |
| 1488 | /* |
| 1489 | * Must be called with sighand->lock held, which is common to |
| 1490 | * all threads in the group. Holding cred_guard_mutex is not |
| 1491 | * needed because this new task is not yet running and cannot |
| 1492 | * be racing exec. |
| 1493 | */ |
Guenter Roeck | 69f6a34 | 2014-08-10 20:50:30 -0700 | [diff] [blame] | 1494 | assert_spin_locked(¤t->sighand->siglock); |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1495 | |
| 1496 | /* Ref-count the new filter user, and assign it. */ |
| 1497 | get_seccomp_filter(current); |
| 1498 | p->seccomp = current->seccomp; |
| 1499 | |
| 1500 | /* |
| 1501 | * Explicitly enable no_new_privs here in case it got set |
| 1502 | * between the task_struct being duplicated and holding the |
| 1503 | * sighand lock. The seccomp state and nnp must be in sync. |
| 1504 | */ |
| 1505 | if (task_no_new_privs(current)) |
| 1506 | task_set_no_new_privs(p); |
| 1507 | |
| 1508 | /* |
| 1509 | * If the parent gained a seccomp mode after copying thread |
| 1510 | * flags and between before we held the sighand lock, we have |
| 1511 | * to manually enable the seccomp thread flag here. |
| 1512 | */ |
| 1513 | if (p->seccomp.mode != SECCOMP_MODE_DISABLED) |
| 1514 | set_tsk_thread_flag(p, TIF_SECCOMP); |
| 1515 | #endif |
| 1516 | } |
| 1517 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 1518 | SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1519 | { |
| 1520 | current->clear_child_tid = tidptr; |
| 1521 | |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1522 | return task_pid_vnr(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | } |
| 1524 | |
Alexey Dobriyan | a39bc51 | 2007-10-18 23:41:10 -0700 | [diff] [blame] | 1525 | static void rt_mutex_init_task(struct task_struct *p) |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1526 | { |
Thomas Gleixner | 1d61548 | 2009-11-17 14:54:03 +0100 | [diff] [blame] | 1527 | raw_spin_lock_init(&p->pi_lock); |
Zilvinas Valinskas | e29e175 | 2007-03-16 13:38:34 -0800 | [diff] [blame] | 1528 | #ifdef CONFIG_RT_MUTEXES |
Davidlohr Bueso | a23ba90 | 2017-09-08 16:15:01 -0700 | [diff] [blame] | 1529 | p->pi_waiters = RB_ROOT_CACHED; |
Xunlei Pang | e96a7705 | 2017-03-23 15:56:08 +0100 | [diff] [blame] | 1530 | p->pi_top_task = NULL; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1531 | p->pi_blocked_on = NULL; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 1532 | #endif |
| 1533 | } |
| 1534 | |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1535 | #ifdef CONFIG_POSIX_TIMERS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | /* |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1537 | * Initialize POSIX timer handling for a single task. |
| 1538 | */ |
| 1539 | static void posix_cpu_timers_init(struct task_struct *tsk) |
| 1540 | { |
Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1541 | tsk->cputime_expires.prof_exp = 0; |
| 1542 | tsk->cputime_expires.virt_exp = 0; |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1543 | tsk->cputime_expires.sched_exp = 0; |
| 1544 | INIT_LIST_HEAD(&tsk->cpu_timers[0]); |
| 1545 | INIT_LIST_HEAD(&tsk->cpu_timers[1]); |
| 1546 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); |
| 1547 | } |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 1548 | #else |
| 1549 | static inline void posix_cpu_timers_init(struct task_struct *tsk) { } |
| 1550 | #endif |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1551 | |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1552 | static inline void |
| 1553 | init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) |
| 1554 | { |
| 1555 | task->pids[type].pid = pid; |
| 1556 | } |
| 1557 | |
Ingo Molnar | 6bfbaa5 | 2017-02-03 21:37:49 +0100 | [diff] [blame] | 1558 | static inline void rcu_copy_process(struct task_struct *p) |
| 1559 | { |
| 1560 | #ifdef CONFIG_PREEMPT_RCU |
| 1561 | p->rcu_read_lock_nesting = 0; |
| 1562 | p->rcu_read_unlock_special.s = 0; |
| 1563 | p->rcu_blocked_node = NULL; |
| 1564 | INIT_LIST_HEAD(&p->rcu_node_entry); |
| 1565 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
| 1566 | #ifdef CONFIG_TASKS_RCU |
| 1567 | p->rcu_tasks_holdout = false; |
| 1568 | INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); |
| 1569 | p->rcu_tasks_idle_cpu = -1; |
| 1570 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
| 1571 | } |
| 1572 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1573 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | * This creates a new process as a copy of the old one, |
| 1575 | * but does not actually start it yet. |
| 1576 | * |
| 1577 | * It copies the registers, and all the appropriate |
| 1578 | * parts of the process environment (as per the clone |
| 1579 | * flags). The actual kick-off is left to the caller. |
| 1580 | */ |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 1581 | static __latent_entropy struct task_struct *copy_process( |
| 1582 | unsigned long clone_flags, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1583 | unsigned long stack_start, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1584 | unsigned long stack_size, |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1585 | int __user *child_tidptr, |
Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 1586 | struct pid *pid, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 1587 | int trace, |
Andi Kleen | 725fc62 | 2016-05-23 16:24:05 -0700 | [diff] [blame] | 1588 | unsigned long tls, |
| 1589 | int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | { |
| 1591 | int retval; |
Mariusz Kozlowski | a24efe6 | 2007-10-18 23:41:09 -0700 | [diff] [blame] | 1592 | struct task_struct *p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | |
Marcos Paulo de Souza | 667b609 | 2018-02-06 15:39:34 -0800 | [diff] [blame] | 1594 | /* |
| 1595 | * Don't allow sharing the root directory with processes in a different |
| 1596 | * namespace |
| 1597 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
| 1599 | return ERR_PTR(-EINVAL); |
| 1600 | |
Eric W. Biederman | e66eded | 2013-03-13 11:51:49 -0700 | [diff] [blame] | 1601 | if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) |
| 1602 | return ERR_PTR(-EINVAL); |
| 1603 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | /* |
| 1605 | * Thread groups must share signals as well, and detached threads |
| 1606 | * can only be started up within the thread group. |
| 1607 | */ |
| 1608 | if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) |
| 1609 | return ERR_PTR(-EINVAL); |
| 1610 | |
| 1611 | /* |
| 1612 | * Shared signal handlers imply shared VM. By way of the above, |
| 1613 | * thread groups also imply shared VM. Blocking this case allows |
| 1614 | * for various simplifications in other code. |
| 1615 | */ |
| 1616 | if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) |
| 1617 | return ERR_PTR(-EINVAL); |
| 1618 | |
Sukadev Bhattiprolu | 123be07 | 2009-09-23 15:57:20 -0700 | [diff] [blame] | 1619 | /* |
| 1620 | * Siblings of global init remain as zombies on exit since they are |
| 1621 | * not reaped by their parent (swapper). To solve this and to avoid |
| 1622 | * multi-rooted process trees, prevent global and container-inits |
| 1623 | * from creating siblings. |
| 1624 | */ |
| 1625 | if ((clone_flags & CLONE_PARENT) && |
| 1626 | current->signal->flags & SIGNAL_UNKILLABLE) |
| 1627 | return ERR_PTR(-EINVAL); |
| 1628 | |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1629 | /* |
Oleg Nesterov | 40a0d32 | 2013-09-11 14:19:41 -0700 | [diff] [blame] | 1630 | * If the new process will be in a different pid or user namespace |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 1631 | * do not allow it to share a thread group with the forking task. |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1632 | */ |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 1633 | if (clone_flags & CLONE_THREAD) { |
Oleg Nesterov | 40a0d32 | 2013-09-11 14:19:41 -0700 | [diff] [blame] | 1634 | if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || |
| 1635 | (task_active_pid_ns(current) != |
| 1636 | current->nsproxy->pid_ns_for_children)) |
| 1637 | return ERR_PTR(-EINVAL); |
| 1638 | } |
Eric W. Biederman | 8382fca | 2012-12-20 19:26:06 -0800 | [diff] [blame] | 1639 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | retval = -ENOMEM; |
Andi Kleen | 725fc62 | 2016-05-23 16:24:05 -0700 | [diff] [blame] | 1641 | p = dup_task_struct(current, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | if (!p) |
| 1643 | goto fork_out; |
| 1644 | |
Vegard Nossum | 4d6501d | 2017-05-09 09:39:59 +0200 | [diff] [blame] | 1645 | /* |
| 1646 | * This _must_ happen before we call free_task(), i.e. before we jump |
| 1647 | * to any of the bad_fork_* labels. This is to avoid freeing |
| 1648 | * p->set_child_tid which is (ab)used as a kthread's data pointer for |
| 1649 | * kernel threads (PF_KTHREAD). |
| 1650 | */ |
| 1651 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
| 1652 | /* |
| 1653 | * Clear TID on mm_release()? |
| 1654 | */ |
| 1655 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; |
| 1656 | |
Steven Rostedt | f7e8b61 | 2009-06-02 16:39:48 -0400 | [diff] [blame] | 1657 | ftrace_graph_init_task(p); |
| 1658 | |
Peter Zijlstra | bea493a | 2006-10-17 00:10:33 -0700 | [diff] [blame] | 1659 | rt_mutex_init_task(p); |
| 1660 | |
Ingo Molnar | d12c1a3 | 2008-07-14 12:09:28 +0200 | [diff] [blame] | 1661 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1662 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); |
| 1663 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
| 1664 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1665 | retval = -EAGAIN; |
David Howells | 3b11a1d | 2008-11-14 10:39:26 +1100 | [diff] [blame] | 1666 | if (atomic_read(&p->real_cred->user->processes) >= |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 1667 | task_rlimit(p, RLIMIT_NPROC)) { |
Eric Paris | b57922b | 2013-07-03 15:08:29 -0700 | [diff] [blame] | 1668 | if (p->real_cred->user != INIT_USER && |
| 1669 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1670 | goto bad_fork_free; |
| 1671 | } |
Vasiliy Kulikov | 72fa599 | 2011-08-08 19:02:04 +0400 | [diff] [blame] | 1672 | current->flags &= ~PF_NPROC_EXCEEDED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | |
David Howells | f1752ee | 2008-11-14 10:39:17 +1100 | [diff] [blame] | 1674 | retval = copy_creds(p, clone_flags); |
| 1675 | if (retval < 0) |
| 1676 | goto bad_fork_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | |
| 1678 | /* |
| 1679 | * If multiple threads are within copy_process(), then this check |
| 1680 | * triggers too late. This doesn't hurt, the check is only there |
| 1681 | * to stop root fork bombs. |
| 1682 | */ |
Li Zefan | 04ec93f | 2009-02-06 08:17:19 +0000 | [diff] [blame] | 1683 | retval = -EAGAIN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1684 | if (nr_threads >= max_threads) |
| 1685 | goto bad_fork_cleanup_count; |
| 1686 | |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 1687 | delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 1688 | p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE); |
David Rientjes | 514ddb4 | 2014-04-07 15:37:27 -0700 | [diff] [blame] | 1689 | p->flags |= PF_FORKNOEXEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | INIT_LIST_HEAD(&p->children); |
| 1691 | INIT_LIST_HEAD(&p->sibling); |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 1692 | rcu_copy_process(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1693 | p->vfork_done = NULL; |
| 1694 | spin_lock_init(&p->alloc_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | init_sigpending(&p->pending); |
| 1697 | |
Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1698 | p->utime = p->stime = p->gtime = 0; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 1699 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 1700 | p->utimescaled = p->stimescaled = 0; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 1701 | #endif |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 1702 | prev_cputime_init(&p->prev_cputime); |
| 1703 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 1704 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 1705 | seqcount_init(&p->vtime.seqcount); |
| 1706 | p->vtime.starttime = 0; |
| 1707 | p->vtime.state = VTIME_INACTIVE; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 1708 | #endif |
| 1709 | |
KAMEZAWA Hiroyuki | a3a2e76 | 2010-04-06 14:34:42 -0700 | [diff] [blame] | 1710 | #if defined(SPLIT_RSS_COUNTING) |
| 1711 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); |
| 1712 | #endif |
Balbir Singh | 172ba84 | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 1713 | |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1714 | p->default_timer_slack_ns = current->timer_slack_ns; |
| 1715 | |
Andrea Righi | 5995477 | 2008-07-27 17:29:15 +0200 | [diff] [blame] | 1716 | task_io_accounting_init(&p->ioac); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | acct_clear_integrals(p); |
| 1718 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 1719 | posix_cpu_timers_init(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | |
Thomas Gleixner | ccbf62d | 2014-07-16 21:04:34 +0000 | [diff] [blame] | 1721 | p->start_time = ktime_get_ns(); |
Thomas Gleixner | 57e0be0 | 2014-07-16 21:04:32 +0000 | [diff] [blame] | 1722 | p->real_start_time = ktime_get_boot_ns(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1723 | p->io_context = NULL; |
Richard Guy Briggs | c0b0ae8 | 2018-05-12 21:58:21 -0400 | [diff] [blame] | 1724 | audit_set_context(p, NULL); |
Paul Menage | b4f48b6 | 2007-10-18 23:39:33 -0700 | [diff] [blame] | 1725 | cgroup_fork(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 | #ifdef CONFIG_NUMA |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 1727 | p->mempolicy = mpol_dup(p->mempolicy); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1728 | if (IS_ERR(p->mempolicy)) { |
| 1729 | retval = PTR_ERR(p->mempolicy); |
| 1730 | p->mempolicy = NULL; |
Li Zefan | e8604cb | 2014-03-28 15:18:27 +0800 | [diff] [blame] | 1731 | goto bad_fork_cleanup_threadgroup_lock; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1732 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | #endif |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 1734 | #ifdef CONFIG_CPUSETS |
| 1735 | p->cpuset_mem_spread_rotor = NUMA_NO_NODE; |
| 1736 | p->cpuset_slab_spread_rotor = NUMA_NO_NODE; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 1737 | seqcount_init(&p->mems_allowed_seq); |
Michal Hocko | 778d3b0 | 2011-07-26 16:08:30 -0700 | [diff] [blame] | 1738 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 1739 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 1740 | p->irq_events = 0; |
| 1741 | p->hardirqs_enabled = 0; |
| 1742 | p->hardirq_enable_ip = 0; |
| 1743 | p->hardirq_enable_event = 0; |
| 1744 | p->hardirq_disable_ip = _THIS_IP_; |
| 1745 | p->hardirq_disable_event = 0; |
| 1746 | p->softirqs_enabled = 1; |
| 1747 | p->softirq_enable_ip = _THIS_IP_; |
| 1748 | p->softirq_enable_event = 0; |
| 1749 | p->softirq_disable_ip = 0; |
| 1750 | p->softirq_disable_event = 0; |
| 1751 | p->hardirq_context = 0; |
| 1752 | p->softirq_context = 0; |
| 1753 | #endif |
David Hildenbrand | 8bcbde5 | 2015-05-11 17:52:06 +0200 | [diff] [blame] | 1754 | |
| 1755 | p->pagefault_disabled = 0; |
| 1756 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1757 | #ifdef CONFIG_LOCKDEP |
| 1758 | p->lockdep_depth = 0; /* no locks held yet */ |
| 1759 | p->curr_chain_key = 0; |
| 1760 | p->lockdep_recursion = 0; |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 1761 | lockdep_init_task(p); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 1762 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 1764 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1765 | p->blocked_on = NULL; /* not blocked yet */ |
| 1766 | #endif |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1767 | #ifdef CONFIG_BCACHE |
| 1768 | p->sequential_io = 0; |
| 1769 | p->sequential_io_avg = 0; |
| 1770 | #endif |
Markus Metzger | 0f48140 | 2009-04-03 16:43:48 +0200 | [diff] [blame] | 1771 | |
Srivatsa Vaddagiri | 3c90e6e | 2007-11-09 22:39:39 +0100 | [diff] [blame] | 1772 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 1773 | retval = sched_fork(clone_flags, p); |
| 1774 | if (retval) |
| 1775 | goto bad_fork_cleanup_policy; |
Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 1776 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1777 | retval = perf_event_init_task(p); |
Peter Zijlstra | 6ab423e | 2009-05-25 14:45:27 +0200 | [diff] [blame] | 1778 | if (retval) |
| 1779 | goto bad_fork_cleanup_policy; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1780 | retval = audit_alloc(p); |
| 1781 | if (retval) |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 1782 | goto bad_fork_cleanup_perf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1783 | /* copy all the process information */ |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 1784 | shm_init_task(p); |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 1785 | retval = security_task_alloc(p, clone_flags); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1786 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1787 | goto bad_fork_cleanup_audit; |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 1788 | retval = copy_semundo(clone_flags, p); |
| 1789 | if (retval) |
| 1790 | goto bad_fork_cleanup_security; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1791 | retval = copy_files(clone_flags, p); |
| 1792 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | goto bad_fork_cleanup_semundo; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1794 | retval = copy_fs(clone_flags, p); |
| 1795 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | goto bad_fork_cleanup_files; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1797 | retval = copy_sighand(clone_flags, p); |
| 1798 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | goto bad_fork_cleanup_fs; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1800 | retval = copy_signal(clone_flags, p); |
| 1801 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | goto bad_fork_cleanup_sighand; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1803 | retval = copy_mm(clone_flags, p); |
| 1804 | if (retval) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1805 | goto bad_fork_cleanup_signal; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1806 | retval = copy_namespaces(clone_flags, p); |
| 1807 | if (retval) |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 1808 | goto bad_fork_cleanup_mm; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1809 | retval = copy_io(clone_flags, p); |
| 1810 | if (retval) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1811 | goto bad_fork_cleanup_namespaces; |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 1812 | retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1813 | if (retval) |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1814 | goto bad_fork_cleanup_io; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1816 | if (pid != &init_struct_pid) { |
Andy Lutomirski | c2b1df2 | 2013-08-22 11:39:16 -0700 | [diff] [blame] | 1817 | pid = alloc_pid(p->nsproxy->pid_ns_for_children); |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 1818 | if (IS_ERR(pid)) { |
| 1819 | retval = PTR_ERR(pid); |
Jiri Slaby | 0740aa5 | 2016-05-20 17:00:25 -0700 | [diff] [blame] | 1820 | goto bad_fork_cleanup_thread; |
Michal Hocko | 35f71bc | 2015-04-16 12:47:38 -0700 | [diff] [blame] | 1821 | } |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1822 | } |
| 1823 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1824 | #ifdef CONFIG_BLOCK |
| 1825 | p->plug = NULL; |
| 1826 | #endif |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1827 | #ifdef CONFIG_FUTEX |
Ingo Molnar | 8f17d3a | 2006-03-27 01:16:27 -0800 | [diff] [blame] | 1828 | p->robust_list = NULL; |
| 1829 | #ifdef CONFIG_COMPAT |
| 1830 | p->compat_robust_list = NULL; |
| 1831 | #endif |
Ingo Molnar | c87e283 | 2006-06-27 02:54:58 -0700 | [diff] [blame] | 1832 | INIT_LIST_HEAD(&p->pi_state_list); |
| 1833 | p->pi_state_cache = NULL; |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1834 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | /* |
GOTO Masanori | f9a3879 | 2006-03-13 21:20:44 -0800 | [diff] [blame] | 1836 | * sigaltstack should be cleared when sharing the same VM |
| 1837 | */ |
| 1838 | if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) |
Stas Sergeev | 2a74213 | 2016-04-14 23:20:04 +0300 | [diff] [blame] | 1839 | sas_ss_reset(p); |
GOTO Masanori | f9a3879 | 2006-03-13 21:20:44 -0800 | [diff] [blame] | 1840 | |
| 1841 | /* |
Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 1842 | * Syscall tracing and stepping should be turned off in the |
| 1843 | * child regardless of CLONE_PTRACE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | */ |
Oleg Nesterov | 6580807 | 2009-12-15 16:47:16 -0800 | [diff] [blame] | 1845 | user_disable_single_step(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1846 | clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE); |
Laurent Vivier | ed75e8d | 2005-09-03 15:57:18 -0700 | [diff] [blame] | 1847 | #ifdef TIF_SYSCALL_EMU |
| 1848 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); |
| 1849 | #endif |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1850 | clear_all_latency_tracing(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1852 | /* ok, now we should be set up.. */ |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 1853 | p->pid = pid_nr(pid); |
| 1854 | if (clone_flags & CLONE_THREAD) { |
Oleg Nesterov | 5f8aadd | 2012-03-14 19:55:38 +0100 | [diff] [blame] | 1855 | p->exit_signal = -1; |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 1856 | p->group_leader = current->group_leader; |
| 1857 | p->tgid = current->tgid; |
| 1858 | } else { |
| 1859 | if (clone_flags & CLONE_PARENT) |
| 1860 | p->exit_signal = current->group_leader->exit_signal; |
| 1861 | else |
| 1862 | p->exit_signal = (clone_flags & CSIGNAL); |
| 1863 | p->group_leader = p; |
| 1864 | p->tgid = p->pid; |
| 1865 | } |
Oleg Nesterov | 5f8aadd | 2012-03-14 19:55:38 +0100 | [diff] [blame] | 1866 | |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1867 | p->nr_dirtied = 0; |
| 1868 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); |
Wu Fengguang | 8371235 | 2011-06-11 19:25:42 -0600 | [diff] [blame] | 1869 | p->dirty_paused_when = 0; |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1870 | |
Oleg Nesterov | bb8cbbf | 2013-11-13 15:36:12 +0100 | [diff] [blame] | 1871 | p->pdeath_signal = 0; |
Oleg Nesterov | 47e6532 | 2006-03-28 16:11:25 -0800 | [diff] [blame] | 1872 | INIT_LIST_HEAD(&p->thread_group); |
Al Viro | 158e164 | 2012-06-27 09:24:13 +0400 | [diff] [blame] | 1873 | p->task_works = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | |
Ingo Molnar | 780de9d | 2017-02-02 11:50:56 +0100 | [diff] [blame] | 1875 | cgroup_threadgroup_change_begin(current); |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 1876 | /* |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 1877 | * Ensure that the cgroup subsystem policies allow the new process to be |
| 1878 | * forked. It should be noted the the new process's css_set can be changed |
| 1879 | * between here and cgroup_post_fork() if an organisation operation is in |
| 1880 | * progress. |
| 1881 | */ |
Oleg Nesterov | b53202e | 2015-12-03 10:24:08 -0500 | [diff] [blame] | 1882 | retval = cgroup_can_fork(p); |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 1883 | if (retval) |
| 1884 | goto bad_fork_free_pid; |
| 1885 | |
| 1886 | /* |
Oleg Nesterov | 18c830d | 2013-07-03 15:08:32 -0700 | [diff] [blame] | 1887 | * Make it visible to the rest of the system, but dont wake it up yet. |
| 1888 | * Need tasklist lock for parent etc handling! |
| 1889 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1890 | write_lock_irq(&tasklist_lock); |
| 1891 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1892 | /* CLONE_PARENT re-uses the old parent */ |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1893 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1894 | p->real_parent = current->real_parent; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1895 | p->parent_exec_id = current->parent_exec_id; |
| 1896 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1897 | p->real_parent = current; |
Oleg Nesterov | 2d5516c | 2009-03-02 22:58:45 +0100 | [diff] [blame] | 1898 | p->parent_exec_id = current->self_exec_id; |
| 1899 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 | |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 1901 | klp_copy_process(p); |
| 1902 | |
Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 1903 | spin_lock(¤t->sighand->siglock); |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1904 | |
| 1905 | /* |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1906 | * Copy seccomp details explicitly here, in case they were changed |
| 1907 | * before holding sighand lock. |
| 1908 | */ |
| 1909 | copy_seccomp(p); |
| 1910 | |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1911 | rseq_fork(p, clone_flags); |
| 1912 | |
Kees Cook | dbd95212 | 2014-06-27 15:18:48 -0700 | [diff] [blame] | 1913 | /* |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1914 | * Process group and session signals need to be delivered to just the |
| 1915 | * parent before the fork or both the parent and the child after the |
| 1916 | * fork. Restart if a signal comes in before we add the new process to |
| 1917 | * it's process group. |
| 1918 | * A fatal signal pending means that current will exit, so the new |
| 1919 | * thread can't slip out of an OOM kill (or normal SIGKILL). |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 1920 | */ |
Daniel Walker | 23ff444 | 2007-10-18 03:06:07 -0700 | [diff] [blame] | 1921 | recalc_sigpending(); |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1922 | if (signal_pending(current)) { |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1923 | retval = -ERESTARTNOINTR; |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 1924 | goto bad_fork_cancel_cgroup; |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1925 | } |
Gargi Sharma | e8cfbc2 | 2017-11-17 15:30:34 -0800 | [diff] [blame] | 1926 | if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { |
Kirill Tkhai | 3fd3722 | 2017-05-12 19:11:31 +0300 | [diff] [blame] | 1927 | retval = -ENOMEM; |
| 1928 | goto bad_fork_cancel_cgroup; |
| 1929 | } |
Oleg Nesterov | 4a2c7a7 | 2006-03-28 16:11:26 -0800 | [diff] [blame] | 1930 | |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1931 | if (likely(p->pid)) { |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 1932 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1934 | init_task_pid(p, PIDTYPE_PID, pid); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1935 | if (thread_group_leader(p)) { |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1936 | init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
| 1937 | init_task_pid(p, PIDTYPE_SID, task_session(current)); |
| 1938 | |
Eric W. Biederman | 1c4042c | 2010-07-12 17:10:36 -0700 | [diff] [blame] | 1939 | if (is_child_reaper(pid)) { |
Eric W. Biederman | 17cf22c | 2010-03-02 14:51:53 -0800 | [diff] [blame] | 1940 | ns_of_pid(pid)->child_reaper = p; |
Eric W. Biederman | 1c4042c | 2010-07-12 17:10:36 -0700 | [diff] [blame] | 1941 | p->signal->flags |= SIGNAL_UNKILLABLE; |
| 1942 | } |
Oleg Nesterov | c97d989 | 2006-03-28 16:11:06 -0800 | [diff] [blame] | 1943 | |
Oleg Nesterov | fea9d17 | 2008-02-08 04:19:19 -0800 | [diff] [blame] | 1944 | p->signal->leader_pid = pid; |
Alan Cox | 9c9f4de | 2008-10-13 10:37:26 +0100 | [diff] [blame] | 1945 | p->signal->tty = tty_kref_get(current->signal->tty); |
Pavel Tikhomirov | 749860c | 2017-01-30 18:06:12 +0300 | [diff] [blame] | 1946 | /* |
| 1947 | * Inherit has_child_subreaper flag under the same |
| 1948 | * tasklist_lock with adding child to the process tree |
| 1949 | * for propagate_has_child_subreaper optimization. |
| 1950 | */ |
| 1951 | p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || |
| 1952 | p->real_parent->signal->is_child_subreaper; |
Oleg Nesterov | 9cd80bb | 2009-12-17 15:27:15 -0800 | [diff] [blame] | 1953 | list_add_tail(&p->sibling, &p->real_parent->children); |
Eric W. Biederman | 5e85d4a | 2006-04-18 22:20:16 -0700 | [diff] [blame] | 1954 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1955 | attach_pid(p, PIDTYPE_PGID); |
| 1956 | attach_pid(p, PIDTYPE_SID); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 1957 | __this_cpu_inc(process_counts); |
Oleg Nesterov | 80628ca | 2013-07-03 15:08:30 -0700 | [diff] [blame] | 1958 | } else { |
| 1959 | current->signal->nr_threads++; |
| 1960 | atomic_inc(¤t->signal->live); |
| 1961 | atomic_inc(¤t->signal->sigcnt); |
Oleg Nesterov | 80628ca | 2013-07-03 15:08:30 -0700 | [diff] [blame] | 1962 | list_add_tail_rcu(&p->thread_group, |
| 1963 | &p->group_leader->thread_group); |
Oleg Nesterov | 0c740d0 | 2014-01-21 15:49:56 -0800 | [diff] [blame] | 1964 | list_add_tail_rcu(&p->thread_node, |
| 1965 | &p->signal->thread_head); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1966 | } |
Oleg Nesterov | 8190773 | 2013-07-03 15:08:31 -0700 | [diff] [blame] | 1967 | attach_pid(p, PIDTYPE_PID); |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 1968 | nr_threads++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | } |
| 1970 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1971 | total_forks++; |
Oleg Nesterov | 3f17da6 | 2006-02-15 22:13:24 +0300 | [diff] [blame] | 1972 | spin_unlock(¤t->sighand->siglock); |
Oleg Nesterov | 4af4206 | 2014-04-13 20:58:54 +0200 | [diff] [blame] | 1973 | syscall_tracepoint_update(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1974 | write_unlock_irq(&tasklist_lock); |
Oleg Nesterov | 4af4206 | 2014-04-13 20:58:54 +0200 | [diff] [blame] | 1975 | |
Andrew Morton | c13cf85 | 2005-11-28 13:43:48 -0800 | [diff] [blame] | 1976 | proc_fork_connector(p); |
Oleg Nesterov | b53202e | 2015-12-03 10:24:08 -0500 | [diff] [blame] | 1977 | cgroup_post_fork(p); |
Ingo Molnar | 780de9d | 2017-02-02 11:50:56 +0100 | [diff] [blame] | 1978 | cgroup_threadgroup_change_end(current); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1979 | perf_event_fork(p); |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 1980 | |
| 1981 | trace_task_newtask(p, clone_flags); |
Oleg Nesterov | 3ab6796 | 2013-10-16 19:39:37 +0200 | [diff] [blame] | 1982 | uprobe_copy_process(p, clone_flags); |
KAMEZAWA Hiroyuki | 43d2b11 | 2012-01-10 15:08:09 -0800 | [diff] [blame] | 1983 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1984 | return p; |
| 1985 | |
Aleksa Sarai | 7e47682 | 2015-06-09 21:32:09 +1000 | [diff] [blame] | 1986 | bad_fork_cancel_cgroup: |
Kirill Tkhai | 3fd3722 | 2017-05-12 19:11:31 +0300 | [diff] [blame] | 1987 | spin_unlock(¤t->sighand->siglock); |
| 1988 | write_unlock_irq(&tasklist_lock); |
Oleg Nesterov | b53202e | 2015-12-03 10:24:08 -0500 | [diff] [blame] | 1989 | cgroup_cancel_fork(p); |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1990 | bad_fork_free_pid: |
Ingo Molnar | 780de9d | 2017-02-02 11:50:56 +0100 | [diff] [blame] | 1991 | cgroup_threadgroup_change_end(current); |
Pavel Emelyanov | 425fb2b | 2007-10-18 23:40:07 -0700 | [diff] [blame] | 1992 | if (pid != &init_struct_pid) |
| 1993 | free_pid(pid); |
Jiri Slaby | 0740aa5 | 2016-05-20 17:00:25 -0700 | [diff] [blame] | 1994 | bad_fork_cleanup_thread: |
| 1995 | exit_thread(p); |
Jens Axboe | fd0928d | 2008-01-24 08:52:45 +0100 | [diff] [blame] | 1996 | bad_fork_cleanup_io: |
Louis Rilling | b69f229 | 2009-12-04 14:52:42 +0100 | [diff] [blame] | 1997 | if (p->io_context) |
| 1998 | exit_io_context(p); |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 1999 | bad_fork_cleanup_namespaces: |
Linus Torvalds | 444f378 | 2007-01-30 13:35:18 -0800 | [diff] [blame] | 2000 | exit_task_namespaces(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2001 | bad_fork_cleanup_mm: |
David Rientjes | c9f0124 | 2011-10-31 17:07:15 -0700 | [diff] [blame] | 2002 | if (p->mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2003 | mmput(p->mm); |
| 2004 | bad_fork_cleanup_signal: |
Oleg Nesterov | 4ab6c08 | 2009-08-26 14:29:24 -0700 | [diff] [blame] | 2005 | if (!(clone_flags & CLONE_THREAD)) |
Mike Galbraith | 1c5354d | 2011-01-05 11:16:04 +0100 | [diff] [blame] | 2006 | free_signal_struct(p->signal); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2007 | bad_fork_cleanup_sighand: |
Oleg Nesterov | a7e5328 | 2006-03-28 16:11:27 -0800 | [diff] [blame] | 2008 | __cleanup_sighand(p->sighand); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 | bad_fork_cleanup_fs: |
| 2010 | exit_fs(p); /* blocking */ |
| 2011 | bad_fork_cleanup_files: |
| 2012 | exit_files(p); /* blocking */ |
| 2013 | bad_fork_cleanup_semundo: |
| 2014 | exit_sem(p); |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 2015 | bad_fork_cleanup_security: |
| 2016 | security_task_free(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2017 | bad_fork_cleanup_audit: |
| 2018 | audit_free(p); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 2019 | bad_fork_cleanup_perf: |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 2020 | perf_event_free_task(p); |
Peter Zijlstra | 6c72e350 | 2014-10-02 16:17:02 -0700 | [diff] [blame] | 2021 | bad_fork_cleanup_policy: |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 2022 | lockdep_free_task(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | #ifdef CONFIG_NUMA |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 2024 | mpol_put(p->mempolicy); |
Li Zefan | e8604cb | 2014-03-28 15:18:27 +0800 | [diff] [blame] | 2025 | bad_fork_cleanup_threadgroup_lock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2026 | #endif |
Shailabh Nagar | 35df17c | 2006-08-31 21:27:38 -0700 | [diff] [blame] | 2027 | delayacct_tsk_free(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | bad_fork_cleanup_count: |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 2029 | atomic_dec(&p->cred->user->processes); |
David Howells | e0e8173 | 2009-09-02 09:13:40 +0100 | [diff] [blame] | 2030 | exit_creds(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2031 | bad_fork_free: |
Andy Lutomirski | 405c075 | 2016-10-31 08:11:43 -0700 | [diff] [blame] | 2032 | p->state = TASK_DEAD; |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 2033 | put_task_stack(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2034 | free_task(p); |
Oleg Nesterov | fe7d37d | 2006-01-08 01:04:02 -0800 | [diff] [blame] | 2035 | fork_out: |
| 2036 | return ERR_PTR(retval); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2037 | } |
| 2038 | |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2039 | static inline void init_idle_pids(struct pid_link *links) |
| 2040 | { |
| 2041 | enum pid_type type; |
| 2042 | |
| 2043 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { |
| 2044 | INIT_HLIST_NODE(&links[type].node); /* not really needed */ |
| 2045 | links[type].pid = &init_struct_pid; |
| 2046 | } |
| 2047 | } |
| 2048 | |
Paul Gortmaker | 0db0628 | 2013-06-19 14:53:51 -0400 | [diff] [blame] | 2049 | struct task_struct *fork_idle(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2050 | { |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 2051 | struct task_struct *task; |
Andi Kleen | 725fc62 | 2016-05-23 16:24:05 -0700 | [diff] [blame] | 2052 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0, |
| 2053 | cpu_to_node(cpu)); |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2054 | if (!IS_ERR(task)) { |
| 2055 | init_idle_pids(task->pids); |
Akinobu Mita | 753ca4f | 2006-11-25 11:09:34 -0800 | [diff] [blame] | 2056 | init_idle(task, cpu); |
Oleg Nesterov | f106eee | 2010-05-26 14:44:11 -0700 | [diff] [blame] | 2057 | } |
Oleg Nesterov | 73b9ebf | 2006-03-28 16:11:07 -0800 | [diff] [blame] | 2058 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2059 | return task; |
| 2060 | } |
| 2061 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | /* |
| 2063 | * Ok, this is the main fork-routine. |
| 2064 | * |
| 2065 | * It copies the process, and if successful kick-starts |
| 2066 | * it and waits for it to finish using the VM if required. |
| 2067 | */ |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2068 | long _do_fork(unsigned long clone_flags, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2069 | unsigned long stack_start, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | unsigned long stack_size, |
| 2071 | int __user *parent_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2072 | int __user *child_tidptr, |
| 2073 | unsigned long tls) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2074 | { |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2075 | struct completion vfork; |
| 2076 | struct pid *pid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2077 | struct task_struct *p; |
| 2078 | int trace = 0; |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 2079 | long nr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | |
Andrew Morton | bdff746 | 2008-02-04 22:27:22 -0800 | [diff] [blame] | 2081 | /* |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2082 | * Determine whether and which event to report to ptracer. When |
| 2083 | * called from kernel_thread or CLONE_UNTRACED is explicitly |
| 2084 | * requested, no event is reported; otherwise, report if the event |
| 2085 | * for the type of forking is enabled. |
Roland McGrath | 09a0539 | 2008-07-25 19:45:47 -0700 | [diff] [blame] | 2086 | */ |
Al Viro | e80d666 | 2012-10-22 23:10:08 -0400 | [diff] [blame] | 2087 | if (!(clone_flags & CLONE_UNTRACED)) { |
Tejun Heo | 4b9d33e | 2011-06-17 16:50:38 +0200 | [diff] [blame] | 2088 | if (clone_flags & CLONE_VFORK) |
| 2089 | trace = PTRACE_EVENT_VFORK; |
| 2090 | else if ((clone_flags & CSIGNAL) != SIGCHLD) |
| 2091 | trace = PTRACE_EVENT_CLONE; |
| 2092 | else |
| 2093 | trace = PTRACE_EVENT_FORK; |
| 2094 | |
| 2095 | if (likely(!ptrace_event_enabled(current, trace))) |
| 2096 | trace = 0; |
| 2097 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2098 | |
Al Viro | 62e791c | 2012-10-22 22:52:26 -0400 | [diff] [blame] | 2099 | p = copy_process(clone_flags, stack_start, stack_size, |
Andi Kleen | 725fc62 | 2016-05-23 16:24:05 -0700 | [diff] [blame] | 2100 | child_tidptr, NULL, trace, tls, NUMA_NO_NODE); |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 2101 | add_latent_entropy(); |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2102 | |
| 2103 | if (IS_ERR(p)) |
| 2104 | return PTR_ERR(p); |
| 2105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2106 | /* |
| 2107 | * Do this prior waking up the new thread - the thread pointer |
| 2108 | * might get invalid after that point, if the thread exits quickly. |
| 2109 | */ |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2110 | trace_sched_process_fork(current, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2112 | pid = get_task_pid(p, PIDTYPE_PID); |
| 2113 | nr = pid_vnr(pid); |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 2114 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2115 | if (clone_flags & CLONE_PARENT_SETTID) |
| 2116 | put_user(nr, parent_tidptr); |
Pavel Emelyanov | 30e49c2 | 2007-10-18 23:40:10 -0700 | [diff] [blame] | 2117 | |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2118 | if (clone_flags & CLONE_VFORK) { |
| 2119 | p->vfork_done = &vfork; |
| 2120 | init_completion(&vfork); |
| 2121 | get_task_struct(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2122 | } |
Marcos Paulo de Souza | 9f5325a | 2018-02-06 15:39:30 -0800 | [diff] [blame] | 2123 | |
| 2124 | wake_up_new_task(p); |
| 2125 | |
| 2126 | /* forking complete and child started to run, tell ptracer */ |
| 2127 | if (unlikely(trace)) |
| 2128 | ptrace_event_pid(trace, pid); |
| 2129 | |
| 2130 | if (clone_flags & CLONE_VFORK) { |
| 2131 | if (!wait_for_vfork_done(p, &vfork)) |
| 2132 | ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); |
| 2133 | } |
| 2134 | |
| 2135 | put_pid(pid); |
Eric W. Biederman | 92476d7 | 2006-03-31 02:31:42 -0800 | [diff] [blame] | 2136 | return nr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2137 | } |
| 2138 | |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2139 | #ifndef CONFIG_HAVE_COPY_THREAD_TLS |
| 2140 | /* For compatibility with architectures that call do_fork directly rather than |
| 2141 | * using the syscall entry points below. */ |
| 2142 | long do_fork(unsigned long clone_flags, |
| 2143 | unsigned long stack_start, |
| 2144 | unsigned long stack_size, |
| 2145 | int __user *parent_tidptr, |
| 2146 | int __user *child_tidptr) |
| 2147 | { |
| 2148 | return _do_fork(clone_flags, stack_start, stack_size, |
| 2149 | parent_tidptr, child_tidptr, 0); |
| 2150 | } |
| 2151 | #endif |
| 2152 | |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2153 | /* |
| 2154 | * Create a kernel thread. |
| 2155 | */ |
| 2156 | pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) |
| 2157 | { |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2158 | return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, |
| 2159 | (unsigned long)arg, NULL, NULL, 0); |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2160 | } |
Al Viro | 2aa3a7f | 2012-09-21 19:55:31 -0400 | [diff] [blame] | 2161 | |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2162 | #ifdef __ARCH_WANT_SYS_FORK |
| 2163 | SYSCALL_DEFINE0(fork) |
| 2164 | { |
| 2165 | #ifdef CONFIG_MMU |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2166 | return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2167 | #else |
| 2168 | /* can not support in nommu mode */ |
Daeseok Youn | 5d59e18 | 2014-01-23 15:55:47 -0800 | [diff] [blame] | 2169 | return -EINVAL; |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2170 | #endif |
| 2171 | } |
| 2172 | #endif |
| 2173 | |
| 2174 | #ifdef __ARCH_WANT_SYS_VFORK |
| 2175 | SYSCALL_DEFINE0(vfork) |
| 2176 | { |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2177 | return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, |
| 2178 | 0, NULL, NULL, 0); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2179 | } |
| 2180 | #endif |
| 2181 | |
| 2182 | #ifdef __ARCH_WANT_SYS_CLONE |
| 2183 | #ifdef CONFIG_CLONE_BACKWARDS |
| 2184 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2185 | int __user *, parent_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2186 | unsigned long, tls, |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2187 | int __user *, child_tidptr) |
| 2188 | #elif defined(CONFIG_CLONE_BACKWARDS2) |
| 2189 | SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, |
| 2190 | int __user *, parent_tidptr, |
| 2191 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2192 | unsigned long, tls) |
Michal Simek | dfa9771 | 2013-08-13 16:00:53 -0700 | [diff] [blame] | 2193 | #elif defined(CONFIG_CLONE_BACKWARDS3) |
| 2194 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2195 | int, stack_size, |
| 2196 | int __user *, parent_tidptr, |
| 2197 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2198 | unsigned long, tls) |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2199 | #else |
| 2200 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
| 2201 | int __user *, parent_tidptr, |
| 2202 | int __user *, child_tidptr, |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2203 | unsigned long, tls) |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2204 | #endif |
| 2205 | { |
Josh Triplett | 3033f14a | 2015-06-25 15:01:19 -0700 | [diff] [blame] | 2206 | return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls); |
Al Viro | d212504 | 2012-10-23 13:17:59 -0400 | [diff] [blame] | 2207 | } |
| 2208 | #endif |
| 2209 | |
Oleg Nesterov | 0f1b92c | 2017-01-30 18:06:11 +0300 | [diff] [blame] | 2210 | void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) |
| 2211 | { |
| 2212 | struct task_struct *leader, *parent, *child; |
| 2213 | int res; |
| 2214 | |
| 2215 | read_lock(&tasklist_lock); |
| 2216 | leader = top = top->group_leader; |
| 2217 | down: |
| 2218 | for_each_thread(leader, parent) { |
| 2219 | list_for_each_entry(child, &parent->children, sibling) { |
| 2220 | res = visitor(child, data); |
| 2221 | if (res) { |
| 2222 | if (res < 0) |
| 2223 | goto out; |
| 2224 | leader = child; |
| 2225 | goto down; |
| 2226 | } |
| 2227 | up: |
| 2228 | ; |
| 2229 | } |
| 2230 | } |
| 2231 | |
| 2232 | if (leader != top) { |
| 2233 | child = leader; |
| 2234 | parent = child->real_parent; |
| 2235 | leader = parent->group_leader; |
| 2236 | goto up; |
| 2237 | } |
| 2238 | out: |
| 2239 | read_unlock(&tasklist_lock); |
| 2240 | } |
| 2241 | |
Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 2242 | #ifndef ARCH_MIN_MMSTRUCT_ALIGN |
| 2243 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
| 2244 | #endif |
| 2245 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 2246 | static void sighand_ctor(void *data) |
Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 2247 | { |
| 2248 | struct sighand_struct *sighand = data; |
| 2249 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 2250 | spin_lock_init(&sighand->siglock); |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 2251 | init_waitqueue_head(&sighand->signalfd_wqh); |
Oleg Nesterov | aa1757f | 2006-03-28 16:11:12 -0800 | [diff] [blame] | 2252 | } |
| 2253 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | void __init proc_caches_init(void) |
| 2255 | { |
| 2256 | sighand_cachep = kmem_cache_create("sighand_cache", |
| 2257 | sizeof(struct sighand_struct), 0, |
Paul E. McKenney | 5f0d5a3 | 2017-01-18 02:53:44 -0800 | [diff] [blame] | 2258 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2259 | SLAB_ACCOUNT, sighand_ctor); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2260 | signal_cachep = kmem_cache_create("signal_cache", |
| 2261 | sizeof(struct signal_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2262 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2263 | NULL); |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2264 | files_cachep = kmem_cache_create("files_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2265 | sizeof(struct files_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2266 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2267 | NULL); |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 2268 | fs_cachep = kmem_cache_create("fs_cache", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2269 | sizeof(struct fs_struct), 0, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2270 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2271 | NULL); |
Linus Torvalds | 6345d24 | 2011-05-29 11:32:28 -0700 | [diff] [blame] | 2272 | /* |
| 2273 | * FIXME! The "sizeof(struct mm_struct)" currently includes the |
| 2274 | * whole struct cpumask for the OFFSTACK case. We could change |
| 2275 | * this to *only* allocate as much of it as required by the |
| 2276 | * maximum number of CPU's we can ever have. The cpumask_allocation |
| 2277 | * is at the end of the structure, exactly for that reason. |
| 2278 | */ |
David Windsor | 07dcd7f | 2017-08-15 16:45:00 -0700 | [diff] [blame] | 2279 | mm_cachep = kmem_cache_create_usercopy("mm_struct", |
Ravikiran G Thirumalai | 5fd63b3 | 2006-01-11 22:46:15 +0100 | [diff] [blame] | 2280 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
Levin, Alexander (Sasha Levin) | 75f296d | 2017-11-15 17:35:54 -0800 | [diff] [blame] | 2281 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
David Windsor | 07dcd7f | 2017-08-15 16:45:00 -0700 | [diff] [blame] | 2282 | offsetof(struct mm_struct, saved_auxv), |
| 2283 | sizeof_field(struct mm_struct, saved_auxv), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 2284 | NULL); |
| 2285 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 2286 | mmap_init(); |
Al Viro | 6657719 | 2011-06-28 15:41:10 -0400 | [diff] [blame] | 2287 | nsproxy_cache_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2288 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2289 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2290 | /* |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2291 | * Check constraints on flags passed to the unshare system call. |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2292 | */ |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2293 | static int check_unshare_flags(unsigned long unshare_flags) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2294 | { |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2295 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
| 2296 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 2297 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| |
Aditya Kali | a79a908 | 2016-01-29 02:54:06 -0600 | [diff] [blame] | 2298 | CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP)) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2299 | return -EINVAL; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2300 | /* |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2301 | * Not implemented, but pretend it works if there is nothing |
| 2302 | * to unshare. Note that unsharing the address space or the |
| 2303 | * signal handlers also need to unshare the signal queues (aka |
| 2304 | * CLONE_THREAD). |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2305 | */ |
| 2306 | if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2307 | if (!thread_group_empty(current)) |
| 2308 | return -EINVAL; |
| 2309 | } |
| 2310 | if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { |
| 2311 | if (atomic_read(¤t->sighand->count) > 1) |
| 2312 | return -EINVAL; |
| 2313 | } |
| 2314 | if (unshare_flags & CLONE_VM) { |
| 2315 | if (!current_is_single_threaded()) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2316 | return -EINVAL; |
| 2317 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2318 | |
| 2319 | return 0; |
| 2320 | } |
| 2321 | |
| 2322 | /* |
JANAK DESAI | 99d1419 | 2006-02-07 12:58:59 -0800 | [diff] [blame] | 2323 | * Unshare the filesystem structure if it is being shared |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2324 | */ |
| 2325 | static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) |
| 2326 | { |
| 2327 | struct fs_struct *fs = current->fs; |
| 2328 | |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 2329 | if (!(unshare_flags & CLONE_FS) || !fs) |
| 2330 | return 0; |
| 2331 | |
| 2332 | /* don't need lock here; in the worst case we'll do useless copy */ |
| 2333 | if (fs->users == 1) |
| 2334 | return 0; |
| 2335 | |
| 2336 | *new_fsp = copy_fs_struct(fs); |
| 2337 | if (!*new_fsp) |
| 2338 | return -ENOMEM; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2339 | |
| 2340 | return 0; |
| 2341 | } |
| 2342 | |
| 2343 | /* |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2344 | * Unshare file descriptor table if it is being shared |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2345 | */ |
| 2346 | static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp) |
| 2347 | { |
| 2348 | struct files_struct *fd = current->files; |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2349 | int error = 0; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2350 | |
| 2351 | if ((unshare_flags & CLONE_FILES) && |
JANAK DESAI | a016f33 | 2006-02-07 12:59:02 -0800 | [diff] [blame] | 2352 | (fd && atomic_read(&fd->count) > 1)) { |
| 2353 | *new_fdp = dup_fd(fd, &error); |
| 2354 | if (!*new_fdp) |
| 2355 | return error; |
| 2356 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2357 | |
| 2358 | return 0; |
| 2359 | } |
| 2360 | |
| 2361 | /* |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2362 | * unshare allows a process to 'unshare' part of the process |
| 2363 | * context which was originally shared using clone. copy_* |
| 2364 | * functions used by do_fork() cannot be used here directly |
| 2365 | * because they modify an inactive task_struct that is being |
| 2366 | * constructed. Here we are modifying the current, active, |
| 2367 | * task_struct. |
| 2368 | */ |
Dominik Brodowski | 9b32105 | 2018-03-11 11:34:42 +0100 | [diff] [blame] | 2369 | int ksys_unshare(unsigned long unshare_flags) |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2370 | { |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2371 | struct fs_struct *fs, *new_fs = NULL; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2372 | struct files_struct *fd, *new_fd = NULL; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2373 | struct cred *new_cred = NULL; |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 2374 | struct nsproxy *new_nsproxy = NULL; |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 2375 | int do_sysvsem = 0; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2376 | int err; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2377 | |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 2378 | /* |
Eric W. Biederman | faf00da | 2015-08-10 18:25:44 -0500 | [diff] [blame] | 2379 | * If unsharing a user namespace must also unshare the thread group |
| 2380 | * and unshare the filesystem root and working directories. |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2381 | */ |
| 2382 | if (unshare_flags & CLONE_NEWUSER) |
Eric W. Biederman | e66eded | 2013-03-13 11:51:49 -0700 | [diff] [blame] | 2383 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2384 | /* |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 2385 | * If unsharing vm, must also unshare signal handlers. |
| 2386 | */ |
| 2387 | if (unshare_flags & CLONE_VM) |
| 2388 | unshare_flags |= CLONE_SIGHAND; |
Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 2389 | /* |
Eric W. Biederman | 12c641a | 2015-08-10 17:35:07 -0500 | [diff] [blame] | 2390 | * If unsharing a signal handlers, must also unshare the signal queues. |
| 2391 | */ |
| 2392 | if (unshare_flags & CLONE_SIGHAND) |
| 2393 | unshare_flags |= CLONE_THREAD; |
| 2394 | /* |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2395 | * If unsharing namespace, must also unshare filesystem information. |
| 2396 | */ |
| 2397 | if (unshare_flags & CLONE_NEWNS) |
| 2398 | unshare_flags |= CLONE_FS; |
Eric W. Biederman | 50804fe | 2010-03-02 15:41:50 -0800 | [diff] [blame] | 2399 | |
| 2400 | err = check_unshare_flags(unshare_flags); |
| 2401 | if (err) |
| 2402 | goto bad_unshare_out; |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2403 | /* |
Manfred Spraul | 6013f67 | 2008-04-29 01:00:59 -0700 | [diff] [blame] | 2404 | * CLONE_NEWIPC must also detach from the undolist: after switching |
| 2405 | * to a new ipc namespace, the semaphore arrays from the old |
| 2406 | * namespace are unreachable. |
| 2407 | */ |
| 2408 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 2409 | do_sysvsem = 1; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2410 | err = unshare_fs(unshare_flags, &new_fs); |
| 2411 | if (err) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2412 | goto bad_unshare_out; |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2413 | err = unshare_fd(unshare_flags, &new_fd); |
| 2414 | if (err) |
Oleg Nesterov | 9bfb23f | 2011-03-22 16:34:09 -0700 | [diff] [blame] | 2415 | goto bad_unshare_cleanup_fs; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2416 | err = unshare_userns(unshare_flags, &new_cred); |
Daniel Rebelo de Oliveira | fb0a685 | 2011-07-26 16:08:39 -0700 | [diff] [blame] | 2417 | if (err) |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 2418 | goto bad_unshare_cleanup_fd; |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2419 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, |
| 2420 | new_cred, new_fs); |
| 2421 | if (err) |
| 2422 | goto bad_unshare_cleanup_cred; |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2423 | |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2424 | if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { |
Manfred Spraul | 9edff4a | 2008-04-29 01:00:57 -0700 | [diff] [blame] | 2425 | if (do_sysvsem) { |
| 2426 | /* |
| 2427 | * CLONE_SYSVSEM is equivalent to sys_exit(). |
| 2428 | */ |
| 2429 | exit_sem(current); |
| 2430 | } |
Jack Miller | ab602f7 | 2014-08-08 14:23:19 -0700 | [diff] [blame] | 2431 | if (unshare_flags & CLONE_NEWIPC) { |
| 2432 | /* Orphan segments in old ns (see sem above). */ |
| 2433 | exit_shm(current); |
| 2434 | shm_init_task(current); |
| 2435 | } |
Serge E. Hallyn | ab51601 | 2006-10-02 02:18:06 -0700 | [diff] [blame] | 2436 | |
Alan Cox | 6f977e6 | 2013-02-27 17:03:23 -0800 | [diff] [blame] | 2437 | if (new_nsproxy) |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 2438 | switch_task_namespaces(current, new_nsproxy); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2439 | |
Pavel Emelyanov | cf7b708 | 2007-10-18 23:39:54 -0700 | [diff] [blame] | 2440 | task_lock(current); |
| 2441 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2442 | if (new_fs) { |
| 2443 | fs = current->fs; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 2444 | spin_lock(&fs->lock); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2445 | current->fs = new_fs; |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 2446 | if (--fs->users) |
| 2447 | new_fs = NULL; |
| 2448 | else |
| 2449 | new_fs = fs; |
Nick Piggin | 2a4419b | 2010-08-18 04:37:33 +1000 | [diff] [blame] | 2450 | spin_unlock(&fs->lock); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2451 | } |
| 2452 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2453 | if (new_fd) { |
| 2454 | fd = current->files; |
| 2455 | current->files = new_fd; |
| 2456 | new_fd = fd; |
| 2457 | } |
| 2458 | |
| 2459 | task_unlock(current); |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2460 | |
| 2461 | if (new_cred) { |
| 2462 | /* Install the new user namespace */ |
| 2463 | commit_creds(new_cred); |
| 2464 | new_cred = NULL; |
| 2465 | } |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2466 | } |
| 2467 | |
Hari Bathini | e422267 | 2017-03-08 02:11:36 +0530 | [diff] [blame] | 2468 | perf_event_namespaces(current); |
| 2469 | |
Eric W. Biederman | b2e0d987 | 2012-07-26 05:15:35 -0700 | [diff] [blame] | 2470 | bad_unshare_cleanup_cred: |
| 2471 | if (new_cred) |
| 2472 | put_cred(new_cred); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2473 | bad_unshare_cleanup_fd: |
| 2474 | if (new_fd) |
| 2475 | put_files_struct(new_fd); |
| 2476 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2477 | bad_unshare_cleanup_fs: |
| 2478 | if (new_fs) |
Al Viro | 498052b | 2009-03-30 07:20:30 -0400 | [diff] [blame] | 2479 | free_fs_struct(new_fs); |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2480 | |
JANAK DESAI | cf2e340 | 2006-02-07 12:58:58 -0800 | [diff] [blame] | 2481 | bad_unshare_out: |
| 2482 | return err; |
| 2483 | } |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 2484 | |
Dominik Brodowski | 9b32105 | 2018-03-11 11:34:42 +0100 | [diff] [blame] | 2485 | SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
| 2486 | { |
| 2487 | return ksys_unshare(unshare_flags); |
| 2488 | } |
| 2489 | |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 2490 | /* |
| 2491 | * Helper to unshare the files of the current task. |
| 2492 | * We don't want to expose copy_files internals to |
| 2493 | * the exec layer of the kernel. |
| 2494 | */ |
| 2495 | |
| 2496 | int unshare_files(struct files_struct **displaced) |
| 2497 | { |
| 2498 | struct task_struct *task = current; |
Al Viro | 5070451 | 2008-04-26 05:25:00 +0100 | [diff] [blame] | 2499 | struct files_struct *copy = NULL; |
Al Viro | 3b12538 | 2008-04-22 05:31:30 -0400 | [diff] [blame] | 2500 | int error; |
| 2501 | |
| 2502 | error = unshare_fd(CLONE_FILES, ©); |
| 2503 | if (error || !copy) { |
| 2504 | *displaced = NULL; |
| 2505 | return error; |
| 2506 | } |
| 2507 | *displaced = task->files; |
| 2508 | task_lock(task); |
| 2509 | task->files = copy; |
| 2510 | task_unlock(task); |
| 2511 | return 0; |
| 2512 | } |
Heinrich Schuchardt | 16db3d3 | 2015-04-16 12:47:50 -0700 | [diff] [blame] | 2513 | |
| 2514 | int sysctl_max_threads(struct ctl_table *table, int write, |
| 2515 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 2516 | { |
| 2517 | struct ctl_table t; |
| 2518 | int ret; |
| 2519 | int threads = max_threads; |
| 2520 | int min = MIN_THREADS; |
| 2521 | int max = MAX_THREADS; |
| 2522 | |
| 2523 | t = *table; |
| 2524 | t.data = &threads; |
| 2525 | t.extra1 = &min; |
| 2526 | t.extra2 = &max; |
| 2527 | |
| 2528 | ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 2529 | if (ret || !write) |
| 2530 | return ret; |
| 2531 | |
| 2532 | set_max_threads(threads); |
| 2533 | |
| 2534 | return 0; |
| 2535 | } |