Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 2 | /* |
| 3 | * linux/ipc/namespace.c |
| 4 | * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/ipc.h> |
| 8 | #include <linux/msg.h> |
| 9 | #include <linux/ipc_namespace.h> |
| 10 | #include <linux/rcupdate.h> |
| 11 | #include <linux/nsproxy.h> |
| 12 | #include <linux/slab.h> |
Ingo Molnar | 5b825c3 | 2017-02-02 17:54:15 +0100 | [diff] [blame] | 13 | #include <linux/cred.h> |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 14 | #include <linux/fs.h> |
| 15 | #include <linux/mount.h> |
Serge E. Hallyn | b515498 | 2011-03-23 16:43:23 -0700 | [diff] [blame] | 16 | #include <linux/user_namespace.h> |
David Howells | 0bb80f2 | 2013-04-12 01:50:06 +0100 | [diff] [blame] | 17 | #include <linux/proc_ns.h> |
Ingo Molnar | f719ff9b | 2017-02-06 10:57:33 +0100 | [diff] [blame] | 18 | #include <linux/sched/task.h> |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 19 | |
| 20 | #include "util.h" |
| 21 | |
Rik van Riel | a80c4ad | 2023-01-27 13:46:50 -0500 | [diff] [blame] | 22 | /* |
| 23 | * The work queue is used to avoid the cost of synchronize_rcu in kern_unmount. |
| 24 | */ |
| 25 | static void free_ipc(struct work_struct *unused); |
| 26 | static DECLARE_WORK(free_ipc_work, free_ipc); |
| 27 | |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 28 | static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns) |
| 29 | { |
| 30 | return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES); |
| 31 | } |
| 32 | |
| 33 | static void dec_ipc_namespaces(struct ucounts *ucounts) |
| 34 | { |
| 35 | dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES); |
| 36 | } |
| 37 | |
Eric W. Biederman | bcf58e7 | 2012-07-26 04:02:49 -0700 | [diff] [blame] | 38 | static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 39 | struct ipc_namespace *old_ns) |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 40 | { |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 41 | struct ipc_namespace *ns; |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 42 | struct ucounts *ucounts; |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 43 | int err; |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 44 | |
Eric W. Biederman | df75e77 | 2016-09-22 13:08:36 -0500 | [diff] [blame] | 45 | err = -ENOSPC; |
Rik van Riel | a80c4ad | 2023-01-27 13:46:50 -0500 | [diff] [blame] | 46 | again: |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 47 | ucounts = inc_ipc_namespaces(user_ns); |
Rik van Riel | a80c4ad | 2023-01-27 13:46:50 -0500 | [diff] [blame] | 48 | if (!ucounts) { |
| 49 | /* |
| 50 | * IPC namespaces are freed asynchronously, by free_ipc_work. |
| 51 | * If frees were pending, flush_work will wait, and |
| 52 | * return true. Fail the allocation if no frees are pending. |
| 53 | */ |
| 54 | if (flush_work(&free_ipc_work)) |
| 55 | goto again; |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 56 | goto fail; |
Rik van Riel | a80c4ad | 2023-01-27 13:46:50 -0500 | [diff] [blame] | 57 | } |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 58 | |
| 59 | err = -ENOMEM; |
Vasily Averin | 30acd0b | 2021-09-02 14:55:27 -0700 | [diff] [blame] | 60 | ns = kzalloc(sizeof(struct ipc_namespace), GFP_KERNEL_ACCOUNT); |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 61 | if (ns == NULL) |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 62 | goto fail_dec; |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 63 | |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 64 | err = ns_alloc_inum(&ns->ns); |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 65 | if (err) |
| 66 | goto fail_free; |
Al Viro | 33c4294 | 2014-11-01 02:32:53 -0400 | [diff] [blame] | 67 | ns->ns.ops = &ipcns_operations; |
Eric W. Biederman | 98f842e | 2011-06-15 10:21:48 -0700 | [diff] [blame] | 68 | |
Kirill Tkhai | 137ec39 | 2020-08-03 13:16:27 +0300 | [diff] [blame] | 69 | refcount_set(&ns->ns.count, 1); |
Eric W. Biederman | b236017 | 2016-05-31 12:26:41 -0500 | [diff] [blame] | 70 | ns->user_ns = get_user_ns(user_ns); |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 71 | ns->ucounts = ucounts; |
Eric W. Biederman | b236017 | 2016-05-31 12:26:41 -0500 | [diff] [blame] | 72 | |
Guillaume Knispel | 0cfb6ae | 2017-09-08 16:17:55 -0700 | [diff] [blame] | 73 | err = mq_init_ns(ns); |
| 74 | if (err) |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 75 | goto fail_put; |
| 76 | |
Alexey Gladkov | dc55e35 | 2022-02-14 19:18:14 +0100 | [diff] [blame] | 77 | err = -ENOMEM; |
| 78 | if (!setup_mq_sysctls(ns)) |
| 79 | goto fail_put; |
| 80 | |
Alexey Gladkov | 1f5c135 | 2022-02-14 19:18:15 +0100 | [diff] [blame] | 81 | if (!setup_ipc_sysctls(ns)) |
Alexey Gladkov | db7cfc3 | 2022-06-22 22:07:29 +0200 | [diff] [blame] | 82 | goto fail_mq; |
Alexey Gladkov | 1f5c135 | 2022-02-14 19:18:15 +0100 | [diff] [blame] | 83 | |
Jiebin Sun | 72d1e61 | 2022-09-14 03:25:38 +0800 | [diff] [blame] | 84 | err = msg_init_ns(ns); |
| 85 | if (err) |
| 86 | goto fail_put; |
| 87 | |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 88 | sem_init_ns(ns); |
Davidlohr Bueso | eae04d2 | 2018-08-21 22:01:56 -0700 | [diff] [blame] | 89 | shm_init_ns(ns); |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 90 | |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 91 | return ns; |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 92 | |
Alexey Gladkov | db7cfc3 | 2022-06-22 22:07:29 +0200 | [diff] [blame] | 93 | fail_mq: |
| 94 | retire_mq_sysctls(ns); |
| 95 | |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 96 | fail_put: |
| 97 | put_user_ns(ns->user_ns); |
| 98 | ns_free_inum(&ns->ns); |
| 99 | fail_free: |
| 100 | kfree(ns); |
| 101 | fail_dec: |
| 102 | dec_ipc_namespaces(ucounts); |
| 103 | fail: |
| 104 | return ERR_PTR(err); |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 105 | } |
| 106 | |
Serge E. Hallyn | b0e7759 | 2011-03-23 16:43:24 -0700 | [diff] [blame] | 107 | struct ipc_namespace *copy_ipcs(unsigned long flags, |
Eric W. Biederman | bcf58e7 | 2012-07-26 04:02:49 -0700 | [diff] [blame] | 108 | struct user_namespace *user_ns, struct ipc_namespace *ns) |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 109 | { |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 110 | if (!(flags & CLONE_NEWIPC)) |
Alexey Dobriyan | 6442428 | 2009-06-17 16:27:54 -0700 | [diff] [blame] | 111 | return get_ipc_ns(ns); |
Eric W. Biederman | bcf58e7 | 2012-07-26 04:02:49 -0700 | [diff] [blame] | 112 | return create_ipc_ns(user_ns, ns); |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 113 | } |
| 114 | |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 115 | /* |
| 116 | * free_ipcs - free all ipcs of one type |
| 117 | * @ns: the namespace to remove the ipcs from |
| 118 | * @ids: the table of ipcs to free |
| 119 | * @free: the function called to free each individual ipc |
| 120 | * |
| 121 | * Called for each kind of ipc when an ipc_namespace exits. |
| 122 | */ |
| 123 | void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, |
| 124 | void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) |
| 125 | { |
| 126 | struct kern_ipc_perm *perm; |
| 127 | int next_id; |
| 128 | int total, in_use; |
| 129 | |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 130 | down_write(&ids->rwsem); |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 131 | |
| 132 | in_use = ids->in_use; |
| 133 | |
| 134 | for (total = 0, next_id = 0; total < in_use; next_id++) { |
| 135 | perm = idr_find(&ids->ipcs_idr, next_id); |
| 136 | if (perm == NULL) |
| 137 | continue; |
Davidlohr Bueso | 32a2750 | 2013-09-11 14:26:29 -0700 | [diff] [blame] | 138 | rcu_read_lock(); |
| 139 | ipc_lock_object(perm); |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 140 | free(ns, perm); |
| 141 | total++; |
| 142 | } |
Davidlohr Bueso | d9a605e | 2013-09-11 14:26:24 -0700 | [diff] [blame] | 143 | up_write(&ids->rwsem); |
Pierre Peiffer | 01b8b07 | 2008-02-08 04:18:57 -0800 | [diff] [blame] | 144 | } |
| 145 | |
Alexey Dobriyan | b4188de | 2009-06-17 16:27:56 -0700 | [diff] [blame] | 146 | static void free_ipc_ns(struct ipc_namespace *ns) |
| 147 | { |
Rik van Riel | da27f79 | 2023-01-27 13:46:51 -0500 | [diff] [blame] | 148 | /* |
| 149 | * Caller needs to wait for an RCU grace period to have passed |
| 150 | * after making the mount point inaccessible to new accesses. |
Giuseppe Scrivano | e1eb26f | 2020-06-07 21:40:10 -0700 | [diff] [blame] | 151 | */ |
Rik van Riel | da27f79 | 2023-01-27 13:46:51 -0500 | [diff] [blame] | 152 | mntput(ns->mq_mnt); |
Alexey Dobriyan | b4188de | 2009-06-17 16:27:56 -0700 | [diff] [blame] | 153 | sem_exit_ns(ns); |
| 154 | msg_exit_ns(ns); |
| 155 | shm_exit_ns(ns); |
Alexey Dobriyan | b4188de | 2009-06-17 16:27:56 -0700 | [diff] [blame] | 156 | |
Alexey Gladkov | dc55e35 | 2022-02-14 19:18:14 +0100 | [diff] [blame] | 157 | retire_mq_sysctls(ns); |
Alexey Gladkov | 1f5c135 | 2022-02-14 19:18:15 +0100 | [diff] [blame] | 158 | retire_ipc_sysctls(ns); |
Alexey Gladkov | dc55e35 | 2022-02-14 19:18:14 +0100 | [diff] [blame] | 159 | |
Eric W. Biederman | aba3566 | 2016-08-08 14:20:23 -0500 | [diff] [blame] | 160 | dec_ipc_namespaces(ns->ucounts); |
Serge E. Hallyn | b515498 | 2011-03-23 16:43:23 -0700 | [diff] [blame] | 161 | put_user_ns(ns->user_ns); |
Al Viro | 6344c43 | 2014-11-01 00:45:45 -0400 | [diff] [blame] | 162 | ns_free_inum(&ns->ns); |
Xiaotian Feng | be4d250 | 2011-03-25 01:57:01 -0700 | [diff] [blame] | 163 | kfree(ns); |
Alexey Dobriyan | b4188de | 2009-06-17 16:27:56 -0700 | [diff] [blame] | 164 | } |
| 165 | |
Giuseppe Scrivano | e1eb26f | 2020-06-07 21:40:10 -0700 | [diff] [blame] | 166 | static LLIST_HEAD(free_ipc_list); |
| 167 | static void free_ipc(struct work_struct *unused) |
| 168 | { |
| 169 | struct llist_node *node = llist_del_all(&free_ipc_list); |
| 170 | struct ipc_namespace *n, *t; |
| 171 | |
| 172 | llist_for_each_entry_safe(n, t, node, mnt_llist) |
Rik van Riel | da27f79 | 2023-01-27 13:46:51 -0500 | [diff] [blame] | 173 | mnt_make_shortterm(n->mq_mnt); |
| 174 | |
| 175 | /* Wait for any last users to have gone away. */ |
| 176 | synchronize_rcu(); |
| 177 | |
| 178 | llist_for_each_entry_safe(n, t, node, mnt_llist) |
Giuseppe Scrivano | e1eb26f | 2020-06-07 21:40:10 -0700 | [diff] [blame] | 179 | free_ipc_ns(n); |
| 180 | } |
| 181 | |
| 182 | /* |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 183 | * put_ipc_ns - drop a reference to an ipc namespace. |
| 184 | * @ns: the namespace to put |
| 185 | * |
| 186 | * If this is the last task in the namespace exiting, and |
| 187 | * it is dropping the refcount to 0, then it can race with |
| 188 | * a task in another ipc namespace but in a mounts namespace |
| 189 | * which has this ipcns's mqueuefs mounted, doing some action |
| 190 | * with one of the mqueuefs files. That can raise the refcount. |
| 191 | * So dropping the refcount, and raising the refcount when |
| 192 | * accessing it through the VFS, are protected with mq_lock. |
| 193 | * |
| 194 | * (Clearly, a task raising the refcount on its own ipc_ns |
| 195 | * needn't take mq_lock since it can't race with the last task |
| 196 | * in the ipcns exiting). |
| 197 | */ |
| 198 | void put_ipc_ns(struct ipc_namespace *ns) |
Pavel Emelyanov | ae5e1b2 | 2008-02-08 04:18:22 -0800 | [diff] [blame] | 199 | { |
Kirill Tkhai | 137ec39 | 2020-08-03 13:16:27 +0300 | [diff] [blame] | 200 | if (refcount_dec_and_lock(&ns->ns.count, &mq_lock)) { |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 201 | mq_clear_sbinfo(ns); |
| 202 | spin_unlock(&mq_lock); |
Giuseppe Scrivano | e1eb26f | 2020-06-07 21:40:10 -0700 | [diff] [blame] | 203 | |
| 204 | if (llist_add(&ns->mnt_llist, &free_ipc_list)) |
| 205 | schedule_work(&free_ipc_work); |
Serge E. Hallyn | 7eafd7c | 2009-04-06 19:01:10 -0700 | [diff] [blame] | 206 | } |
| 207 | } |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 208 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 209 | static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns) |
| 210 | { |
| 211 | return container_of(ns, struct ipc_namespace, ns); |
| 212 | } |
| 213 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 214 | static struct ns_common *ipcns_get(struct task_struct *task) |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 215 | { |
| 216 | struct ipc_namespace *ns = NULL; |
| 217 | struct nsproxy *nsproxy; |
| 218 | |
Eric W. Biederman | 728dba3 | 2014-02-03 19:13:49 -0800 | [diff] [blame] | 219 | task_lock(task); |
| 220 | nsproxy = task->nsproxy; |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 221 | if (nsproxy) |
| 222 | ns = get_ipc_ns(nsproxy->ipc_ns); |
Eric W. Biederman | 728dba3 | 2014-02-03 19:13:49 -0800 | [diff] [blame] | 223 | task_unlock(task); |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 224 | |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 225 | return ns ? &ns->ns : NULL; |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 226 | } |
| 227 | |
Al Viro | 6496452 | 2014-11-01 00:37:32 -0400 | [diff] [blame] | 228 | static void ipcns_put(struct ns_common *ns) |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 229 | { |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 230 | return put_ipc_ns(to_ipc_ns(ns)); |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 231 | } |
| 232 | |
Christian Brauner | f2a8d52 | 2020-05-05 16:04:30 +0200 | [diff] [blame] | 233 | static int ipcns_install(struct nsset *nsset, struct ns_common *new) |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 234 | { |
Christian Brauner | f2a8d52 | 2020-05-05 16:04:30 +0200 | [diff] [blame] | 235 | struct nsproxy *nsproxy = nsset->nsproxy; |
Al Viro | 3c04118 | 2014-11-01 00:25:30 -0400 | [diff] [blame] | 236 | struct ipc_namespace *ns = to_ipc_ns(new); |
Eric W. Biederman | 5e4a084 | 2012-12-14 07:55:36 -0800 | [diff] [blame] | 237 | if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || |
Christian Brauner | f2a8d52 | 2020-05-05 16:04:30 +0200 | [diff] [blame] | 238 | !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) |
Eric W. Biederman | 142e1d1 | 2012-07-26 01:13:20 -0700 | [diff] [blame] | 239 | return -EPERM; |
| 240 | |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 241 | put_ipc_ns(nsproxy->ipc_ns); |
| 242 | nsproxy->ipc_ns = get_ipc_ns(ns); |
| 243 | return 0; |
| 244 | } |
| 245 | |
Andrey Vagin | bcac25a | 2016-09-06 00:47:13 -0700 | [diff] [blame] | 246 | static struct user_namespace *ipcns_owner(struct ns_common *ns) |
| 247 | { |
| 248 | return to_ipc_ns(ns)->user_ns; |
| 249 | } |
| 250 | |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 251 | const struct proc_ns_operations ipcns_operations = { |
| 252 | .name = "ipc", |
| 253 | .type = CLONE_NEWIPC, |
| 254 | .get = ipcns_get, |
| 255 | .put = ipcns_put, |
| 256 | .install = ipcns_install, |
Andrey Vagin | bcac25a | 2016-09-06 00:47:13 -0700 | [diff] [blame] | 257 | .owner = ipcns_owner, |
Eric W. Biederman | a00eaf1 | 2010-03-07 18:48:39 -0800 | [diff] [blame] | 258 | }; |