blob: 71881bddad2577043f285fcf6173cd238c2d5df8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * POSIX message queues filesystem for Linux.
3 *
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
Michal Wronskif66e9282006-10-03 23:23:27 +02005 * Michal Wronski (michal.wronski@gmail.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
Manfred Spraul239521f2014-01-27 17:07:04 -08009 * Manfred Spraul (manfred@colorfullife.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
George C. Wilson20ca73b2006-05-24 16:09:55 -050011 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This file is released under the GPL.
14 */
15
Randy.Dunlapc59ede72006-01-11 12:17:46 -080016#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/init.h>
18#include <linux/pagemap.h>
19#include <linux/file.h>
20#include <linux/mount.h>
David Howells935c69122018-11-01 23:07:25 +000021#include <linux/fs_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/namei.h>
23#include <linux/sysctl.h>
24#include <linux/poll.h>
25#include <linux/mqueue.h>
26#include <linux/msg.h>
27#include <linux/skbuff.h>
Doug Ledford5b5c4d12012-05-31 16:26:30 -070028#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/netlink.h>
30#include <linux/syscalls.h>
George C. Wilson20ca73b2006-05-24 16:09:55 -050031#include <linux/audit.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070032#include <linux/signal.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080033#include <linux/mutex.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070034#include <linux/nsproxy.h>
35#include <linux/pid.h>
Serge E. Hallyn614b84c2009-04-06 19:01:08 -070036#include <linux/ipc_namespace.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080037#include <linux/user_namespace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Ingo Molnar84f001e2017-02-01 16:36:40 +010039#include <linux/sched/wake_q.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010040#include <linux/sched/signal.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010041#include <linux/sched/user.h>
Ingo Molnar5f921ae2006-03-26 01:37:17 -080042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <net/sock.h>
44#include "util.h"
45
David Howells935c69122018-11-01 23:07:25 +000046struct mqueue_fs_context {
47 struct ipc_namespace *ipc_ns;
Waiman Longd60c4d02022-05-09 18:29:21 -070048 bool newns; /* Set if newly created ipc namespace */
David Howells935c69122018-11-01 23:07:25 +000049};
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#define MQUEUE_MAGIC 0x19800202
52#define DIRENT_SIZE 20
53#define FILENT_SIZE 80
54
55#define SEND 0
56#define RECV 1
57
58#define STATE_NONE 0
Davidlohr Buesofa6004a2015-05-04 07:02:46 -070059#define STATE_READY 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Doug Ledfordd6629852012-05-31 16:26:35 -070061struct posix_msg_tree_node {
62 struct rb_node rb_node;
63 struct list_head msg_list;
64 int priority;
65};
66
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -080067/*
68 * Locking:
69 *
70 * Accesses to a message queue are synchronized by acquiring info->lock.
71 *
72 * There are two notable exceptions:
73 * - The actual wakeup of a sleeping task is performed using the wake_q
74 * framework. info->lock is already released when wake_up_q is called.
75 * - The exit codepaths after sleeping check ext_wait_queue->state without
76 * any locks. If it is STATE_READY, then the syscall is completed without
77 * acquiring info->lock.
78 *
79 * MQ_BARRIER:
80 * To achieve proper release/acquire memory barrier pairing, the state is set to
81 * STATE_READY with smp_store_release(), and it is read with READ_ONCE followed
82 * by smp_acquire__after_ctrl_dep(). In addition, wake_q_add_safe() is used.
83 *
84 * This prevents the following races:
85 *
86 * 1) With the simple wake_q_add(), the task could be gone already before
87 * the increase of the reference happens
88 * Thread A
89 * Thread B
90 * WRITE_ONCE(wait.state, STATE_NONE);
91 * schedule_hrtimeout()
92 * wake_q_add(A)
93 * if (cmpxchg()) // success
94 * ->state = STATE_READY (reordered)
95 * <timeout returns>
96 * if (wait.state == STATE_READY) return;
97 * sysret to user space
98 * sys_exit()
99 * get_task_struct() // UaF
100 *
101 * Solution: Use wake_q_add_safe() and perform the get_task_struct() before
102 * the smp_store_release() that does ->state = STATE_READY.
103 *
104 * 2) Without proper _release/_acquire barriers, the woken up task
105 * could read stale data
106 *
107 * Thread A
108 * Thread B
109 * do_mq_timedreceive
110 * WRITE_ONCE(wait.state, STATE_NONE);
111 * schedule_hrtimeout()
112 * state = STATE_READY;
113 * <timeout returns>
114 * if (wait.state == STATE_READY) return;
115 * msg_ptr = wait.msg; // Access to stale data!
116 * receiver->msg = message; (reordered)
117 *
118 * Solution: use _release and _acquire barriers.
119 *
120 * 3) There is intentionally no barrier when setting current->state
121 * to TASK_INTERRUPTIBLE: spin_unlock(&info->lock) provides the
122 * release memory barrier, and the wakeup is triggered when holding
123 * info->lock, i.e. spin_lock(&info->lock) provided a pairing
124 * acquire memory barrier.
125 */
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127struct ext_wait_queue { /* queue of sleeping tasks */
128 struct task_struct *task;
129 struct list_head list;
130 struct msg_msg *msg; /* ptr of loaded message */
131 int state; /* one of STATE_* values */
132};
133
134struct mqueue_inode_info {
135 spinlock_t lock;
136 struct inode vfs_inode;
137 wait_queue_head_t wait_q;
138
Doug Ledfordd6629852012-05-31 16:26:35 -0700139 struct rb_root msg_tree;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700140 struct rb_node *msg_tree_rightmost;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700141 struct posix_msg_tree_node *node_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 struct mq_attr attr;
143
144 struct sigevent notify;
Manfred Spraul239521f2014-01-27 17:07:04 -0800145 struct pid *notify_owner;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700146 u32 notify_self_exec_id;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800147 struct user_namespace *notify_user_ns;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200148 struct ucounts *ucounts; /* user who created, for accounting */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 struct sock *notify_sock;
150 struct sk_buff *notify_cookie;
151
152 /* for tasks waiting for free space and messages, respectively */
153 struct ext_wait_queue e_wait_q[2];
154
155 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
156};
157
David Howells935c69122018-11-01 23:07:25 +0000158static struct file_system_type mqueue_fs_type;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -0800159static const struct inode_operations mqueue_dir_inode_operations;
Arjan van de Ven9a321442007-02-12 00:55:35 -0800160static const struct file_operations mqueue_file_operations;
Alexey Dobriyanb87221d2009-09-21 17:01:09 -0700161static const struct super_operations mqueue_super_ops;
David Howells935c69122018-11-01 23:07:25 +0000162static const struct fs_context_operations mqueue_fs_context_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static void remove_notification(struct mqueue_inode_info *info);
164
Christoph Lametere18b8902006-12-06 20:33:20 -0800165static struct kmem_cache *mqueue_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
168{
169 return container_of(inode, struct mqueue_inode_info, vfs_inode);
170}
171
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700172/*
173 * This routine should be called with the mq_lock held.
174 */
175static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700176{
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700177 return get_ipc_ns(inode->i_sb->s_fs_info);
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700178}
179
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700180static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700181{
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700182 struct ipc_namespace *ns;
183
184 spin_lock(&mq_lock);
185 ns = __get_ns_from_inode(inode);
186 spin_unlock(&mq_lock);
187 return ns;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700188}
189
Doug Ledfordd6629852012-05-31 16:26:35 -0700190/* Auxiliary functions to manipulate messages' list */
191static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
192{
193 struct rb_node **p, *parent = NULL;
194 struct posix_msg_tree_node *leaf;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700195 bool rightmost = true;
Doug Ledfordd6629852012-05-31 16:26:35 -0700196
197 p = &info->msg_tree.rb_node;
198 while (*p) {
199 parent = *p;
200 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
201
202 if (likely(leaf->priority == msg->m_type))
203 goto insert_msg;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700204 else if (msg->m_type < leaf->priority) {
Doug Ledfordd6629852012-05-31 16:26:35 -0700205 p = &(*p)->rb_left;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700206 rightmost = false;
207 } else
Doug Ledfordd6629852012-05-31 16:26:35 -0700208 p = &(*p)->rb_right;
209 }
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700210 if (info->node_cache) {
211 leaf = info->node_cache;
212 info->node_cache = NULL;
213 } else {
214 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
215 if (!leaf)
216 return -ENOMEM;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700217 INIT_LIST_HEAD(&leaf->msg_list);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700218 }
Doug Ledfordd6629852012-05-31 16:26:35 -0700219 leaf->priority = msg->m_type;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700220
221 if (rightmost)
222 info->msg_tree_rightmost = &leaf->rb_node;
223
Doug Ledfordd6629852012-05-31 16:26:35 -0700224 rb_link_node(&leaf->rb_node, parent, p);
225 rb_insert_color(&leaf->rb_node, &info->msg_tree);
Doug Ledfordd6629852012-05-31 16:26:35 -0700226insert_msg:
227 info->attr.mq_curmsgs++;
228 info->qsize += msg->m_ts;
229 list_add_tail(&msg->m_list, &leaf->msg_list);
230 return 0;
231}
232
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700233static inline void msg_tree_erase(struct posix_msg_tree_node *leaf,
234 struct mqueue_inode_info *info)
235{
236 struct rb_node *node = &leaf->rb_node;
237
238 if (info->msg_tree_rightmost == node)
239 info->msg_tree_rightmost = rb_prev(node);
240
241 rb_erase(node, &info->msg_tree);
Somala Swaraj43afe4d2020-04-06 20:12:53 -0700242 if (info->node_cache)
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700243 kfree(leaf);
Somala Swaraj43afe4d2020-04-06 20:12:53 -0700244 else
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700245 info->node_cache = leaf;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700246}
247
Doug Ledfordd6629852012-05-31 16:26:35 -0700248static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
249{
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700250 struct rb_node *parent = NULL;
Doug Ledfordd6629852012-05-31 16:26:35 -0700251 struct posix_msg_tree_node *leaf;
252 struct msg_msg *msg;
253
254try_again:
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700255 /*
256 * During insert, low priorities go to the left and high to the
257 * right. On receive, we want the highest priorities first, so
258 * walk all the way to the right.
259 */
260 parent = info->msg_tree_rightmost;
Doug Ledfordd6629852012-05-31 16:26:35 -0700261 if (!parent) {
262 if (info->attr.mq_curmsgs) {
263 pr_warn_once("Inconsistency in POSIX message queue, "
264 "no tree element, but supposedly messages "
265 "should exist!\n");
266 info->attr.mq_curmsgs = 0;
267 }
268 return NULL;
269 }
270 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700271 if (unlikely(list_empty(&leaf->msg_list))) {
Doug Ledfordd6629852012-05-31 16:26:35 -0700272 pr_warn_once("Inconsistency in POSIX message queue, "
273 "empty leaf node but we haven't implemented "
274 "lazy leaf delete!\n");
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700275 msg_tree_erase(leaf, info);
Doug Ledfordd6629852012-05-31 16:26:35 -0700276 goto try_again;
277 } else {
278 msg = list_first_entry(&leaf->msg_list,
279 struct msg_msg, m_list);
280 list_del(&msg->m_list);
281 if (list_empty(&leaf->msg_list)) {
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700282 msg_tree_erase(leaf, info);
Doug Ledfordd6629852012-05-31 16:26:35 -0700283 }
284 }
285 info->attr.mq_curmsgs--;
286 info->qsize -= msg->m_ts;
287 return msg;
288}
289
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700290static struct inode *mqueue_get_inode(struct super_block *sb,
Al Viro1b9d5ff72011-07-24 14:18:20 -0400291 struct ipc_namespace *ipc_ns, umode_t mode,
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700292 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
294 struct inode *inode;
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700295 int ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 inode = new_inode(sb);
Jiri Slaby04715202011-07-26 16:08:46 -0700298 if (!inode)
299 goto err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Jiri Slaby04715202011-07-26 16:08:46 -0700301 inode->i_ino = get_next_ino();
302 inode->i_mode = mode;
303 inode->i_uid = current_fsuid();
304 inode->i_gid = current_fsgid();
Deepa Dinamani078cd822016-09-14 07:48:04 -0700305 inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Jiri Slaby04715202011-07-26 16:08:46 -0700307 if (S_ISREG(mode)) {
308 struct mqueue_inode_info *info;
Doug Ledfordd6629852012-05-31 16:26:35 -0700309 unsigned long mq_bytes, mq_treesize;
André Goddard Rosac8308b12010-02-23 04:04:23 -0300310
Jiri Slaby04715202011-07-26 16:08:46 -0700311 inode->i_fop = &mqueue_file_operations;
312 inode->i_size = FILENT_SIZE;
313 /* mqueue specific info */
314 info = MQUEUE_I(inode);
315 spin_lock_init(&info->lock);
316 init_waitqueue_head(&info->wait_q);
317 INIT_LIST_HEAD(&info->e_wait_q[0].list);
318 INIT_LIST_HEAD(&info->e_wait_q[1].list);
319 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800320 info->notify_user_ns = NULL;
Jiri Slaby04715202011-07-26 16:08:46 -0700321 info->qsize = 0;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200322 info->ucounts = NULL; /* set when all is ok */
Doug Ledfordd6629852012-05-31 16:26:35 -0700323 info->msg_tree = RB_ROOT;
Davidlohr Buesoa5091fd2019-05-14 15:46:26 -0700324 info->msg_tree_rightmost = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700325 info->node_cache = NULL;
Jiri Slaby04715202011-07-26 16:08:46 -0700326 memset(&info->attr, 0, sizeof(info->attr));
KOSAKI Motohirocef01842012-05-31 16:26:33 -0700327 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
328 ipc_ns->mq_msg_default);
329 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
330 ipc_ns->mq_msgsize_default);
Jiri Slaby04715202011-07-26 16:08:46 -0700331 if (attr) {
332 info->attr.mq_maxmsg = attr->mq_maxmsg;
333 info->attr.mq_msgsize = attr->mq_msgsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
Doug Ledfordd6629852012-05-31 16:26:35 -0700335 /*
336 * We used to allocate a static array of pointers and account
337 * the size of that array as well as one msg_msg struct per
338 * possible message into the queue size. That's no longer
339 * accurate as the queue is now an rbtree and will grow and
340 * shrink depending on usage patterns. We can, however, still
341 * account one msg_msg struct per message, but the nodes are
342 * allocated depending on priority usage, and most programs
343 * only use one, or a handful, of priorities. However, since
344 * this is pinned memory, we need to assume worst case, so
345 * that means the min(mq_maxmsg, max_priorities) * struct
346 * posix_msg_tree_node.
347 */
Al Viro05c1b292017-12-01 17:43:43 -0500348
349 ret = -EINVAL;
350 if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
351 goto out_inode;
352 if (capable(CAP_SYS_RESOURCE)) {
353 if (info->attr.mq_maxmsg > HARD_MSGMAX ||
354 info->attr.mq_msgsize > HARD_MSGSIZEMAX)
355 goto out_inode;
356 } else {
357 if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
358 info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
359 goto out_inode;
360 }
361 ret = -EOVERFLOW;
362 /* check for overflow */
363 if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
364 goto out_inode;
Doug Ledfordd6629852012-05-31 16:26:35 -0700365 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
366 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
367 sizeof(struct posix_msg_tree_node);
Al Viro05c1b292017-12-01 17:43:43 -0500368 mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
369 if (mq_bytes + mq_treesize < mq_bytes)
370 goto out_inode;
371 mq_bytes += mq_treesize;
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200372 info->ucounts = get_ucounts(current_ucounts());
373 if (info->ucounts) {
374 long msgqueue;
Jiri Slaby04715202011-07-26 16:08:46 -0700375
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200376 spin_lock(&mq_lock);
377 msgqueue = inc_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
378 if (msgqueue == LONG_MAX || msgqueue > rlimit(RLIMIT_MSGQUEUE)) {
379 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
380 spin_unlock(&mq_lock);
381 put_ucounts(info->ucounts);
382 info->ucounts = NULL;
383 /* mqueue_evict_inode() releases info->messages */
384 ret = -EMFILE;
385 goto out_inode;
386 }
387 spin_unlock(&mq_lock);
388 }
Jiri Slaby04715202011-07-26 16:08:46 -0700389 } else if (S_ISDIR(mode)) {
390 inc_nlink(inode);
391 /* Some things misbehave if size == 0 on a directory */
392 inode->i_size = 2 * DIRENT_SIZE;
393 inode->i_op = &mqueue_dir_inode_operations;
394 inode->i_fop = &simple_dir_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
Jiri Slaby04715202011-07-26 16:08:46 -0700396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return inode;
398out_inode:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 iput(inode);
Jiri Slaby04715202011-07-26 16:08:46 -0700400err:
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700401 return ERR_PTR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
David Howells935c69122018-11-01 23:07:25 +0000404static int mqueue_fill_super(struct super_block *sb, struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
406 struct inode *inode;
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -0500407 struct ipc_namespace *ns = sb->s_fs_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Eric W. Biedermana2982cc2016-06-09 15:34:02 -0500409 sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300410 sb->s_blocksize = PAGE_SIZE;
411 sb->s_blocksize_bits = PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 sb->s_magic = MQUEUE_MAGIC;
413 sb->s_op = &mqueue_super_ops;
414
Al Viro48fde702012-01-08 22:15:13 -0500415 inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
416 if (IS_ERR(inode))
417 return PTR_ERR(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Al Viro48fde702012-01-08 22:15:13 -0500419 sb->s_root = d_make_root(inode);
420 if (!sb->s_root)
421 return -ENOMEM;
422 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
David Howells935c69122018-11-01 23:07:25 +0000425static int mqueue_get_tree(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
David Howells935c69122018-11-01 23:07:25 +0000427 struct mqueue_fs_context *ctx = fc->fs_private;
428
Waiman Longd60c4d02022-05-09 18:29:21 -0700429 /*
430 * With a newly created ipc namespace, we don't need to do a search
431 * for an ipc namespace match, but we still need to set s_fs_info.
432 */
433 if (ctx->newns) {
434 fc->s_fs_info = ctx->ipc_ns;
435 return get_tree_nodev(fc, mqueue_fill_super);
436 }
Al Viro533770c2019-09-03 19:05:48 -0400437 return get_tree_keyed(fc, mqueue_fill_super, ctx->ipc_ns);
David Howells935c69122018-11-01 23:07:25 +0000438}
439
440static void mqueue_fs_context_free(struct fs_context *fc)
441{
442 struct mqueue_fs_context *ctx = fc->fs_private;
443
Al Viro709a643d2019-05-12 17:46:05 -0400444 put_ipc_ns(ctx->ipc_ns);
David Howells935c69122018-11-01 23:07:25 +0000445 kfree(ctx);
446}
447
448static int mqueue_init_fs_context(struct fs_context *fc)
449{
450 struct mqueue_fs_context *ctx;
451
452 ctx = kzalloc(sizeof(struct mqueue_fs_context), GFP_KERNEL);
453 if (!ctx)
454 return -ENOMEM;
455
456 ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
Al Viro709a643d2019-05-12 17:46:05 -0400457 put_user_ns(fc->user_ns);
458 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
David Howells935c69122018-11-01 23:07:25 +0000459 fc->fs_private = ctx;
460 fc->ops = &mqueue_fs_context_ops;
461 return 0;
462}
463
Waiman Longd60c4d02022-05-09 18:29:21 -0700464/*
465 * mq_init_ns() is currently the only caller of mq_create_mount().
466 * So the ns parameter is always a newly created ipc namespace.
467 */
David Howells935c69122018-11-01 23:07:25 +0000468static struct vfsmount *mq_create_mount(struct ipc_namespace *ns)
469{
470 struct mqueue_fs_context *ctx;
471 struct fs_context *fc;
472 struct vfsmount *mnt;
473
474 fc = fs_context_for_mount(&mqueue_fs_type, SB_KERNMOUNT);
475 if (IS_ERR(fc))
476 return ERR_CAST(fc);
477
478 ctx = fc->fs_private;
Waiman Longd60c4d02022-05-09 18:29:21 -0700479 ctx->newns = true;
David Howells935c69122018-11-01 23:07:25 +0000480 put_ipc_ns(ctx->ipc_ns);
481 ctx->ipc_ns = get_ipc_ns(ns);
Al Viro709a643d2019-05-12 17:46:05 -0400482 put_user_ns(fc->user_ns);
483 fc->user_ns = get_user_ns(ctx->ipc_ns->user_ns);
David Howells935c69122018-11-01 23:07:25 +0000484
485 mnt = fc_mount(fc);
486 put_fs_context(fc);
487 return mnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700490static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
Yu Zhe2c795fb2022-06-28 10:12:51 +0800492 struct mqueue_inode_info *p = foo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Christoph Lametera35afb82007-05-16 22:10:57 -0700494 inode_init_once(&p->vfs_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
497static struct inode *mqueue_alloc_inode(struct super_block *sb)
498{
499 struct mqueue_inode_info *ei;
500
Muchun Songfd60b282022-03-22 14:41:03 -0700501 ei = alloc_inode_sb(sb, mqueue_inode_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (!ei)
503 return NULL;
504 return &ei->vfs_inode;
505}
506
Al Viro015d7952019-04-15 22:30:30 -0400507static void mqueue_free_inode(struct inode *inode)
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100508{
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100509 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
510}
511
Al Viro6d8af642010-06-05 16:29:45 -0400512static void mqueue_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513{
514 struct mqueue_inode_info *info;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700515 struct ipc_namespace *ipc_ns;
Li Rongqingd6a29462019-05-14 15:46:20 -0700516 struct msg_msg *msg, *nmsg;
517 LIST_HEAD(tmp_msg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Jan Karadbd57682012-05-03 14:48:02 +0200519 clear_inode(inode);
Al Viro6d8af642010-06-05 16:29:45 -0400520
521 if (S_ISDIR(inode->i_mode))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 return;
Al Viro6d8af642010-06-05 16:29:45 -0400523
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700524 ipc_ns = get_ns_from_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 info = MQUEUE_I(inode);
526 spin_lock(&info->lock);
Doug Ledfordd6629852012-05-31 16:26:35 -0700527 while ((msg = msg_get(info)) != NULL)
Li Rongqingd6a29462019-05-14 15:46:20 -0700528 list_add_tail(&msg->m_list, &tmp_msg);
Doug Ledfordce2d52c2012-05-31 16:26:38 -0700529 kfree(info->node_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 spin_unlock(&info->lock);
531
Li Rongqingd6a29462019-05-14 15:46:20 -0700532 list_for_each_entry_safe(msg, nmsg, &tmp_msg, m_list) {
533 list_del(&msg->m_list);
534 free_msg(msg);
535 }
536
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200537 if (info->ucounts) {
Kees Cooka318f122019-07-16 16:30:21 -0700538 unsigned long mq_bytes, mq_treesize;
539
540 /* Total amount of bytes accounted for the mqueue */
541 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
542 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
543 sizeof(struct posix_msg_tree_node);
544
545 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
546 info->attr.mq_msgsize);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 spin_lock(&mq_lock);
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200549 dec_rlimit_ucounts(info->ucounts, UCOUNT_RLIMIT_MSGQUEUE, mq_bytes);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700550 /*
551 * get_ns_from_inode() ensures that the
552 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
553 * to which we now hold a reference, or it is NULL.
554 * We can't put it here under mq_lock, though.
555 */
556 if (ipc_ns)
557 ipc_ns->mq_queues_count--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 spin_unlock(&mq_lock);
Alexey Gladkov6e52a9f2021-04-22 14:27:12 +0200559 put_ucounts(info->ucounts);
560 info->ucounts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 }
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700562 if (ipc_ns)
563 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
Al Viroeecec192017-12-01 17:26:05 -0500566static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567{
Al Viroeecec192017-12-01 17:26:05 -0500568 struct inode *dir = dentry->d_parent->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 struct inode *inode;
Al Viroeecec192017-12-01 17:26:05 -0500570 struct mq_attr *attr = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 int error;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700572 struct ipc_namespace *ipc_ns;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
574 spin_lock(&mq_lock);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700575 ipc_ns = __get_ns_from_inode(dir);
576 if (!ipc_ns) {
577 error = -EACCES;
578 goto out_unlock;
579 }
Davidlohr Buesof3713fd2014-02-25 15:01:45 -0800580
581 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
582 !capable(CAP_SYS_RESOURCE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 error = -ENOSPC;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700584 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700586 ipc_ns->mq_queues_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 spin_unlock(&mq_lock);
588
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700589 inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
Jiri Slabyd40dcdb2011-07-26 16:08:47 -0700590 if (IS_ERR(inode)) {
591 error = PTR_ERR(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 spin_lock(&mq_lock);
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700593 ipc_ns->mq_queues_count--;
594 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 }
596
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700597 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 dir->i_size += DIRENT_SIZE;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700599 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 d_instantiate(dentry, inode);
602 dget(dentry);
603 return 0;
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700604out_unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 spin_unlock(&mq_lock);
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700606 if (ipc_ns)
607 put_ipc_ns(ipc_ns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return error;
609}
610
Christian Brauner6c960e62023-01-13 12:49:13 +0100611static int mqueue_create(struct mnt_idmap *idmap, struct inode *dir,
Christian Brauner549c7292021-01-21 14:19:43 +0100612 struct dentry *dentry, umode_t mode, bool excl)
Al Viroeecec192017-12-01 17:26:05 -0500613{
614 return mqueue_create_attr(dentry, mode, NULL);
615}
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
618{
David Howells75c3cfa2015-03-17 22:26:12 +0000619 struct inode *inode = d_inode(dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Deepa Dinamani078cd822016-09-14 07:48:04 -0700621 dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 dir->i_size -= DIRENT_SIZE;
Manfred Spraul239521f2014-01-27 17:07:04 -0800623 drop_nlink(inode);
624 dput(dentry);
625 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
628/*
629* This is routine for system read from queue file.
630* To avoid mess with doing here some sort of mq_receive we allow
631* to read only queue size & notification info (the only values
632* that are interesting from user point of view and aren't accessible
633* through std routines)
634*/
635static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700636 size_t count, loff_t *off)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
Al Viro496ad9a2013-01-23 17:07:38 -0500638 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 char buffer[FILENT_SIZE];
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700640 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642 spin_lock(&info->lock);
643 snprintf(buffer, sizeof(buffer),
644 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
645 info->qsize,
646 info->notify_owner ? info->notify.sigev_notify : 0,
647 (info->notify_owner &&
648 info->notify.sigev_notify == SIGEV_SIGNAL) ?
649 info->notify.sigev_signo : 0,
Pavel Emelyanov6c5f3e72008-02-08 04:19:20 -0800650 pid_vnr(info->notify_owner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 spin_unlock(&info->lock);
652 buffer[sizeof(buffer)-1] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700654 ret = simple_read_from_buffer(u_data, count, off, buffer,
655 strlen(buffer));
656 if (ret <= 0)
657 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Deepa Dinamani078cd822016-09-14 07:48:04 -0700659 file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
Akinobu Mitaf1a43f92008-07-25 01:48:07 -0700660 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -0700663static int mqueue_flush_file(struct file *filp, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Al Viro496ad9a2013-01-23 17:07:38 -0500665 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 spin_lock(&info->lock);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700668 if (task_tgid(current) == info->notify_owner)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 remove_notification(info);
670
671 spin_unlock(&info->lock);
672 return 0;
673}
674
Al Viro9dd95742017-07-03 00:42:43 -0400675static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Al Viro496ad9a2013-01-23 17:07:38 -0500677 struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
Al Viro9dd95742017-07-03 00:42:43 -0400678 __poll_t retval = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 poll_wait(filp, &info->wait_q, poll_tab);
681
682 spin_lock(&info->lock);
683 if (info->attr.mq_curmsgs)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800684 retval = EPOLLIN | EPOLLRDNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800687 retval |= EPOLLOUT | EPOLLWRNORM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 spin_unlock(&info->lock);
689
690 return retval;
691}
692
693/* Adds current to info->e_wait_q[sr] before element with smaller prio */
694static void wq_add(struct mqueue_inode_info *info, int sr,
695 struct ext_wait_queue *ewp)
696{
697 struct ext_wait_queue *walk;
698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
Jonathan Haws68e34f42018-02-06 15:40:52 -0800700 if (walk->task->prio <= current->prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 list_add_tail(&ewp->list, &walk->list);
702 return;
703 }
704 }
705 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
706}
707
708/*
709 * Puts current task to sleep. Caller must hold queue lock. After return
710 * lock isn't held.
711 * sr: SEND or RECV
712 */
713static int wq_sleep(struct mqueue_inode_info *info, int sr,
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200714 ktime_t *timeout, struct ext_wait_queue *ewp)
Luc Van Oostenryckeac0b1c2017-02-27 14:28:21 -0800715 __releases(&info->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 int retval;
718 signed long time;
719
720 wq_add(info, sr, ewp);
721
722 for (;;) {
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800723 /* memory barrier not required, we hold info->lock */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -0700724 __set_current_state(TASK_INTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 spin_unlock(&info->lock);
Wanlong Gao32ea8452011-10-31 17:06:35 -0700727 time = schedule_hrtimeout_range_clock(timeout, 0,
728 HRTIMER_MODE_ABS, CLOCK_REALTIME);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800730 if (READ_ONCE(ewp->state) == STATE_READY) {
731 /* see MQ_BARRIER for purpose/pairing */
732 smp_acquire__after_ctrl_dep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 retval = 0;
734 goto out;
735 }
736 spin_lock(&info->lock);
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -0800737
738 /* we hold info->lock, so no memory barrier required */
739 if (READ_ONCE(ewp->state) == STATE_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 retval = 0;
741 goto out_unlock;
742 }
743 if (signal_pending(current)) {
744 retval = -ERESTARTSYS;
745 break;
746 }
747 if (time == 0) {
748 retval = -ETIMEDOUT;
749 break;
750 }
751 }
752 list_del(&ewp->list);
753out_unlock:
754 spin_unlock(&info->lock);
755out:
756 return retval;
757}
758
759/*
760 * Returns waiting task that should be serviced first or NULL if none exists
761 */
762static struct ext_wait_queue *wq_get_first_waiter(
763 struct mqueue_inode_info *info, int sr)
764{
765 struct list_head *ptr;
766
767 ptr = info->e_wait_q[sr].list.prev;
768 if (ptr == &info->e_wait_q[sr].list)
769 return NULL;
770 return list_entry(ptr, struct ext_wait_queue, list);
771}
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774static inline void set_cookie(struct sk_buff *skb, char code)
775{
Manfred Spraul239521f2014-01-27 17:07:04 -0800776 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779/*
780 * The next function is only to split too long sys_mq_timedsend
781 */
782static void __do_notify(struct mqueue_inode_info *info)
783{
784 /* notification
785 * invoked when there is registered process and there isn't process
786 * waiting synchronously for message AND state of queue changed from
787 * empty to not empty. Here we are sure that no one is waiting
788 * synchronously. */
789 if (info->notify_owner &&
790 info->attr.mq_curmsgs == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 switch (info->notify.sigev_notify) {
792 case SIGEV_NONE:
793 break;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700794 case SIGEV_SIGNAL: {
795 struct kernel_siginfo sig_i;
796 struct task_struct *task;
797
798 /* do_mq_notify() accepts sigev_signo == 0, why?? */
799 if (!info->notify.sigev_signo)
800 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600802 clear_siginfo(&sig_i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 sig_i.si_signo = info->notify.sigev_signo;
804 sig_i.si_errno = 0;
805 sig_i.si_code = SI_MESGQ;
806 sig_i.si_value = info->notify.sigev_value;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -0800807 rcu_read_lock();
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700808 /* map current pid/uid into info->owner's namespaces */
Sukadev Bhattiprolua6684992009-01-07 18:08:50 -0800809 sig_i.si_pid = task_tgid_nr_ns(current,
810 ns_of_pid(info->notify_owner));
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700811 sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
812 current_uid());
813 /*
814 * We can't use kill_pid_info(), this signal should
815 * bypass check_kill_permission(). It is from kernel
816 * but si_fromuser() can't know this.
817 * We do check the self_exec_id, to avoid sending
818 * signals to programs that don't expect them.
819 */
820 task = pid_task(info->notify_owner, PIDTYPE_TGID);
821 if (task && task->self_exec_id ==
822 info->notify_self_exec_id) {
823 do_send_sig_info(info->notify.sigev_signo,
824 &sig_i, task, PIDTYPE_TGID);
825 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -0800826 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 break;
Oleg Nesterovb5f20062020-05-07 18:35:39 -0700828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 case SIGEV_THREAD:
830 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
Denis V. Lunev7ee015e2007-10-10 21:14:03 -0700831 netlink_sendskb(info->notify_sock, info->notify_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 break;
833 }
834 /* after notification unregisters process */
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700835 put_pid(info->notify_owner);
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800836 put_user_ns(info->notify_user_ns);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700837 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800838 info->notify_user_ns = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 }
840 wake_up(&info->wait_q);
841}
842
Arnd Bergmann21fc5382018-04-13 13:58:00 +0200843static int prepare_timeout(const struct __kernel_timespec __user *u_abs_timeout,
Deepa Dinamanib9047722017-08-02 19:51:11 -0700844 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845{
Deepa Dinamanib9047722017-08-02 19:51:11 -0700846 if (get_timespec64(ts, u_abs_timeout))
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200847 return -EFAULT;
Deepa Dinamanib9047722017-08-02 19:51:11 -0700848 if (!timespec64_valid(ts))
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200849 return -EINVAL;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +0200850 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851}
852
853static void remove_notification(struct mqueue_inode_info *info)
854{
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700855 if (info->notify_owner != NULL &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 info->notify.sigev_notify == SIGEV_THREAD) {
857 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
Denis V. Lunev7ee015e2007-10-10 21:14:03 -0700858 netlink_sendskb(info->notify_sock, info->notify_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700860 put_pid(info->notify_owner);
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800861 put_user_ns(info->notify_user_ns);
Cedric Le Goatera03fcb72006-10-02 02:17:26 -0700862 info->notify_owner = NULL;
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -0800863 info->notify_user_ns = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Al Viro066cc812017-12-01 17:51:39 -0500866static int prepare_open(struct dentry *dentry, int oflag, int ro,
867 umode_t mode, struct filename *name,
Serge E. Hallyn614b84c2009-04-06 19:01:08 -0700868 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869{
David Howells745ca242008-11-14 10:39:22 +1100870 static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
871 MAY_READ | MAY_WRITE };
Al Viro765927b2012-06-26 21:58:53 +0400872 int acc;
Al Viro066cc812017-12-01 17:51:39 -0500873
Al Viro9b20d7f2017-12-01 17:57:02 -0500874 if (d_really_is_negative(dentry)) {
875 if (!(oflag & O_CREAT))
Al Viro066cc812017-12-01 17:51:39 -0500876 return -ENOENT;
Al Viro9b20d7f2017-12-01 17:57:02 -0500877 if (ro)
878 return ro;
879 audit_inode_parent_hidden(name, dentry->d_parent);
880 return vfs_mkobj(dentry, mode & ~current_umask(),
881 mqueue_create_attr, attr);
Al Viro066cc812017-12-01 17:51:39 -0500882 }
Al Viro9b20d7f2017-12-01 17:57:02 -0500883 /* it already existed */
884 audit_inode(name, dentry, 0);
885 if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
886 return -EEXIST;
Al Viro765927b2012-06-26 21:58:53 +0400887 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
Al Viroaf4a5372017-12-01 17:34:22 -0500888 return -EINVAL;
Al Viro765927b2012-06-26 21:58:53 +0400889 acc = oflag2acc[oflag & O_ACCMODE];
Christian Brauner4609e1f2023-01-13 12:49:22 +0100890 return inode_permission(&nop_mnt_idmap, d_inode(dentry), acc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891}
892
Al Viro0d060602017-06-27 21:32:36 -0400893static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
894 struct mq_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -0500896 struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt;
897 struct dentry *root = mnt->mnt_root;
Jeff Layton91a27b22012-10-10 15:25:28 -0400898 struct filename *name;
Al Viroa713fd72017-12-01 18:01:09 -0500899 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 int fd, error;
Al Viro312b90f2012-08-06 10:18:17 +0400901 int ro;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Al Viro0d060602017-06-27 21:32:36 -0400903 audit_mq_open(oflag, mode, attr);
George C. Wilson20ca73b2006-05-24 16:09:55 -0500904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (IS_ERR(name = getname(u_name)))
906 return PTR_ERR(name);
907
Ulrich Drepper269f2132008-05-03 15:28:45 -0400908 fd = get_unused_fd_flags(O_CLOEXEC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 if (fd < 0)
910 goto out_putname;
911
Al Viro312b90f2012-08-06 10:18:17 +0400912 ro = mnt_want_write(mnt); /* we'll drop it in any case */
Al Viro59551022016-01-22 15:40:57 -0500913 inode_lock(d_inode(root));
Jeff Layton91a27b22012-10-10 15:25:28 -0400914 path.dentry = lookup_one_len(name->name, root, strlen(name->name));
Al Viro765927b2012-06-26 21:58:53 +0400915 if (IS_ERR(path.dentry)) {
916 error = PTR_ERR(path.dentry);
André Goddard Rosa4294a8e2010-02-23 04:04:28 -0300917 goto out_putfd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 }
Al Viro312b90f2012-08-06 10:18:17 +0400919 path.mnt = mntget(mnt);
Al Viro066cc812017-12-01 17:51:39 -0500920 error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
921 if (!error) {
922 struct file *file = dentry_open(&path, oflag, current_cred());
923 if (!IS_ERR(file))
924 fd_install(fd, file);
925 else
926 error = PTR_ERR(file);
Alexander Viro7c7dce92006-01-14 15:29:55 -0500927 }
Al Viro765927b2012-06-26 21:58:53 +0400928 path_put(&path);
Alexander Viro7c7dce92006-01-14 15:29:55 -0500929out_putfd:
Al Viro765927b2012-06-26 21:58:53 +0400930 if (error) {
931 put_unused_fd(fd);
932 fd = error;
933 }
Al Viro59551022016-01-22 15:40:57 -0500934 inode_unlock(d_inode(root));
Vladimir Davydov38d78e52013-03-22 15:04:51 -0700935 if (!ro)
936 mnt_drop_write(mnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937out_putname:
938 putname(name);
939 return fd;
940}
941
Al Viro0d060602017-06-27 21:32:36 -0400942SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
943 struct mq_attr __user *, u_attr)
944{
945 struct mq_attr attr;
946 if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
947 return -EFAULT;
948
949 return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
950}
951
Heiko Carstensd5460c92009-01-14 14:14:27 +0100952SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953{
954 int err;
Jeff Layton91a27b22012-10-10 15:25:28 -0400955 struct filename *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 struct dentry *dentry;
957 struct inode *inode = NULL;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -0700958 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
Al Viro312b90f2012-08-06 10:18:17 +0400959 struct vfsmount *mnt = ipc_ns->mq_mnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
961 name = getname(u_name);
962 if (IS_ERR(name))
963 return PTR_ERR(name);
964
Jeff Layton79f65302013-07-08 15:59:36 -0700965 audit_inode_parent_hidden(name, mnt->mnt_root);
Al Viro312b90f2012-08-06 10:18:17 +0400966 err = mnt_want_write(mnt);
967 if (err)
968 goto out_name;
Al Viro59551022016-01-22 15:40:57 -0500969 inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
Jeff Layton91a27b22012-10-10 15:25:28 -0400970 dentry = lookup_one_len(name->name, mnt->mnt_root,
971 strlen(name->name));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 if (IS_ERR(dentry)) {
973 err = PTR_ERR(dentry);
974 goto out_unlock;
975 }
976
David Howells75c3cfa2015-03-17 22:26:12 +0000977 inode = d_inode(dentry);
Al Viro312b90f2012-08-06 10:18:17 +0400978 if (!inode) {
979 err = -ENOENT;
980 } else {
Al Viro7de9c6ee2010-10-23 11:11:40 -0400981 ihold(inode);
Christian Braunerabf08572023-01-13 12:49:10 +0100982 err = vfs_unlink(&nop_mnt_idmap, d_inode(dentry->d_parent),
Christian Brauner6521f892021-01-21 14:19:33 +0100983 dentry, NULL);
Al Viro312b90f2012-08-06 10:18:17 +0400984 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 dput(dentry);
986
987out_unlock:
Al Viro59551022016-01-22 15:40:57 -0500988 inode_unlock(d_inode(mnt->mnt_root));
Jingyu Wang57584782022-09-09 02:54:52 +0800989 iput(inode);
Al Viro312b90f2012-08-06 10:18:17 +0400990 mnt_drop_write(mnt);
991out_name:
992 putname(name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
994 return err;
995}
996
997/* Pipelined send and receive functions.
998 *
999 * If a receiver finds no waiting message, then it registers itself in the
1000 * list of waiting receivers. A sender checks that list before adding the new
1001 * message into the message array. If there is a waiting receiver, then it
1002 * bypasses the message array and directly hands the message over to the
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001003 * receiver. The receiver accepts the message and returns without grabbing the
1004 * queue spinlock:
1005 *
1006 * - Set pointer to message.
1007 * - Queue the receiver task for later wakeup (without the info->lock).
1008 * - Update its state to STATE_READY. Now the receiver can continue.
1009 * - Wake up the process after the lock is dropped. Should the process wake up
1010 * before this wakeup (due to a timeout or a signal) it will either see
1011 * STATE_READY and continue or acquire the lock to check the state again.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 *
1013 * The same algorithm is used for senders.
1014 */
1015
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001016static inline void __pipelined_op(struct wake_q_head *wake_q,
1017 struct mqueue_inode_info *info,
1018 struct ext_wait_queue *this)
1019{
Varad Gautama11ddb32021-05-22 17:41:49 -07001020 struct task_struct *task;
1021
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001022 list_del(&this->list);
Varad Gautama11ddb32021-05-22 17:41:49 -07001023 task = get_task_struct(this->task);
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001024
1025 /* see MQ_BARRIER for purpose/pairing */
1026 smp_store_release(&this->state, STATE_READY);
Varad Gautama11ddb32021-05-22 17:41:49 -07001027 wake_q_add_safe(wake_q, task);
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001028}
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030/* pipelined_send() - send a message directly to the task waiting in
1031 * sys_mq_timedreceive() (without inserting message into a queue).
1032 */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001033static inline void pipelined_send(struct wake_q_head *wake_q,
1034 struct mqueue_inode_info *info,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 struct msg_msg *message,
1036 struct ext_wait_queue *receiver)
1037{
1038 receiver->msg = message;
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001039 __pipelined_op(wake_q, info, receiver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041
1042/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
1043 * gets its message and put to the queue (we have one free place for sure). */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001044static inline void pipelined_receive(struct wake_q_head *wake_q,
1045 struct mqueue_inode_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
1047 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
1048
1049 if (!sender) {
1050 /* for poll */
1051 wake_up_interruptible(&info->wait_q);
1052 return;
1053 }
Doug Ledfordd6629852012-05-31 16:26:35 -07001054 if (msg_insert(sender->msg, info))
1055 return;
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001056
Davidlohr Buesoed29f172020-02-03 17:34:32 -08001057 __pipelined_op(wake_q, info, sender);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058}
1059
Al Viro0d060602017-06-27 21:32:36 -04001060static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
1061 size_t msg_len, unsigned int msg_prio,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001062 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Al Viro2903ff02012-08-28 12:52:22 -04001064 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 struct inode *inode;
1066 struct ext_wait_queue wait;
1067 struct ext_wait_queue *receiver;
1068 struct msg_msg *msg_ptr;
1069 struct mqueue_inode_info *info;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001070 ktime_t expires, *timeout = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001071 struct posix_msg_tree_node *new_leaf = NULL;
Al Viro2903ff02012-08-28 12:52:22 -04001072 int ret = 0;
Waiman Long194a6b52016-11-17 11:46:38 -05001073 DEFINE_WAKE_Q(wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
1075 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
1076 return -EINVAL;
1077
Al Viro0d060602017-06-27 21:32:36 -04001078 if (ts) {
Deepa Dinamanib9047722017-08-02 19:51:11 -07001079 expires = timespec64_to_ktime(*ts);
Al Viro0d060602017-06-27 21:32:36 -04001080 timeout = &expires;
1081 }
1082
1083 audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Al Viro2903ff02012-08-28 12:52:22 -04001085 f = fdget(mqdes);
1086 if (unlikely(!f.file)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001087 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Al Viro496ad9a2013-01-23 17:07:38 -05001091 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001092 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001093 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001095 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 info = MQUEUE_I(inode);
Al Viro9f45f5b2014-10-31 17:44:57 -04001097 audit_file(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Al Viro2903ff02012-08-28 12:52:22 -04001099 if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001100 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 if (unlikely(msg_len > info->attr.mq_msgsize)) {
1105 ret = -EMSGSIZE;
1106 goto out_fput;
1107 }
1108
1109 /* First try to allocate memory, before doing anything with
1110 * existing queues. */
1111 msg_ptr = load_msg(u_msg_ptr, msg_len);
1112 if (IS_ERR(msg_ptr)) {
1113 ret = PTR_ERR(msg_ptr);
1114 goto out_fput;
1115 }
1116 msg_ptr->m_ts = msg_len;
1117 msg_ptr->m_type = msg_prio;
1118
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001119 /*
1120 * msg_insert really wants us to have a valid, spare node struct so
1121 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1122 * fall back to that if necessary.
1123 */
1124 if (!info->node_cache)
1125 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 spin_lock(&info->lock);
1128
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001129 if (!info->node_cache && new_leaf) {
1130 /* Save our speculative allocation into the cache */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001131 INIT_LIST_HEAD(&new_leaf->msg_list);
1132 info->node_cache = new_leaf;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001133 new_leaf = NULL;
1134 } else {
1135 kfree(new_leaf);
1136 }
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
Al Viro2903ff02012-08-28 12:52:22 -04001139 if (f.file->f_flags & O_NONBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 } else {
1142 wait.task = current;
1143 wait.msg = (void *) msg_ptr;
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001144
1145 /* memory barrier not required, we hold info->lock */
1146 WRITE_ONCE(wait.state, STATE_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 ret = wq_sleep(info, SEND, timeout, &wait);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001148 /*
1149 * wq_sleep must be called with info->lock held, and
1150 * returns with the lock released
1151 */
1152 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 } else {
1155 receiver = wq_get_first_waiter(info, RECV);
1156 if (receiver) {
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001157 pipelined_send(&wake_q, info, msg_ptr, receiver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 } else {
1159 /* adds message to the queue */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001160 ret = msg_insert(msg_ptr, info);
1161 if (ret)
1162 goto out_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 __do_notify(info);
1164 }
1165 inode->i_atime = inode->i_mtime = inode->i_ctime =
Deepa Dinamani078cd822016-09-14 07:48:04 -07001166 current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 }
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001168out_unlock:
1169 spin_unlock(&info->lock);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001170 wake_up_q(&wake_q);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001171out_free:
1172 if (ret)
1173 free_msg(msg_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001175 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176out:
1177 return ret;
1178}
1179
Al Viro0d060602017-06-27 21:32:36 -04001180static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1181 size_t msg_len, unsigned int __user *u_msg_prio,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001182 struct timespec64 *ts)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 ssize_t ret;
1185 struct msg_msg *msg_ptr;
Al Viro2903ff02012-08-28 12:52:22 -04001186 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 struct inode *inode;
1188 struct mqueue_inode_info *info;
1189 struct ext_wait_queue wait;
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001190 ktime_t expires, *timeout = NULL;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001191 struct posix_msg_tree_node *new_leaf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Al Viro0d060602017-06-27 21:32:36 -04001193 if (ts) {
Deepa Dinamanib9047722017-08-02 19:51:11 -07001194 expires = timespec64_to_ktime(*ts);
Carsten Emde9ca7d8e2010-04-02 22:40:20 +02001195 timeout = &expires;
Al Viroc32c8af2008-12-14 03:46:48 -05001196 }
George C. Wilson20ca73b2006-05-24 16:09:55 -05001197
Al Viro0d060602017-06-27 21:32:36 -04001198 audit_mq_sendrecv(mqdes, msg_len, 0, ts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
Al Viro2903ff02012-08-28 12:52:22 -04001200 f = fdget(mqdes);
1201 if (unlikely(!f.file)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001202 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
Al Viro496ad9a2013-01-23 17:07:38 -05001206 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001207 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001208 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 info = MQUEUE_I(inode);
Al Viro9f45f5b2014-10-31 17:44:57 -04001212 audit_file(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Al Viro2903ff02012-08-28 12:52:22 -04001214 if (unlikely(!(f.file->f_mode & FMODE_READ))) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001215 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 /* checks if buffer is big enough */
1220 if (unlikely(msg_len < info->attr.mq_msgsize)) {
1221 ret = -EMSGSIZE;
1222 goto out_fput;
1223 }
1224
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001225 /*
1226 * msg_insert really wants us to have a valid, spare node struct so
1227 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1228 * fall back to that if necessary.
1229 */
1230 if (!info->node_cache)
1231 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 spin_lock(&info->lock);
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001234
1235 if (!info->node_cache && new_leaf) {
1236 /* Save our speculative allocation into the cache */
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001237 INIT_LIST_HEAD(&new_leaf->msg_list);
1238 info->node_cache = new_leaf;
Doug Ledfordce2d52c2012-05-31 16:26:38 -07001239 } else {
1240 kfree(new_leaf);
1241 }
1242
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 if (info->attr.mq_curmsgs == 0) {
Al Viro2903ff02012-08-28 12:52:22 -04001244 if (f.file->f_flags & O_NONBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 spin_unlock(&info->lock);
1246 ret = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 } else {
1248 wait.task = current;
Manfred Spraulc5b2cbd2020-02-03 17:34:36 -08001249
1250 /* memory barrier not required, we hold info->lock */
1251 WRITE_ONCE(wait.state, STATE_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 ret = wq_sleep(info, RECV, timeout, &wait);
1253 msg_ptr = wait.msg;
1254 }
1255 } else {
Waiman Long194a6b52016-11-17 11:46:38 -05001256 DEFINE_WAKE_Q(wake_q);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 msg_ptr = msg_get(info);
1259
1260 inode->i_atime = inode->i_mtime = inode->i_ctime =
Deepa Dinamani078cd822016-09-14 07:48:04 -07001261 current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 /* There is now free space in queue. */
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001264 pipelined_receive(&wake_q, info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 spin_unlock(&info->lock);
Davidlohr Buesofa6004a2015-05-04 07:02:46 -07001266 wake_up_q(&wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 ret = 0;
1268 }
1269 if (ret == 0) {
1270 ret = msg_ptr->m_ts;
1271
1272 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1273 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1274 ret = -EFAULT;
1275 }
1276 free_msg(msg_ptr);
1277 }
1278out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001279 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280out:
1281 return ret;
1282}
1283
Al Viro0d060602017-06-27 21:32:36 -04001284SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1285 size_t, msg_len, unsigned int, msg_prio,
Arnd Bergmann21fc5382018-04-13 13:58:00 +02001286 const struct __kernel_timespec __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001287{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001288 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001289 if (u_abs_timeout) {
1290 int res = prepare_timeout(u_abs_timeout, &ts);
1291 if (res)
1292 return res;
1293 p = &ts;
1294 }
1295 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1296}
1297
1298SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1299 size_t, msg_len, unsigned int __user *, u_msg_prio,
Arnd Bergmann21fc5382018-04-13 13:58:00 +02001300 const struct __kernel_timespec __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001301{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001302 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001303 if (u_abs_timeout) {
1304 int res = prepare_timeout(u_abs_timeout, &ts);
1305 if (res)
1306 return res;
1307 p = &ts;
1308 }
1309 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1310}
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312/*
1313 * Notes: the case when user wants us to deregister (with NULL as pointer)
1314 * and he isn't currently owner of notification, will be silently discarded.
1315 * It isn't explicitly defined in the POSIX.
1316 */
Al Viro0d060602017-06-27 21:32:36 -04001317static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318{
Al Viro2903ff02012-08-28 12:52:22 -04001319 int ret;
1320 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 struct sock *sock;
1322 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 struct mqueue_inode_info *info;
1324 struct sk_buff *nc;
1325
Al Viro0d060602017-06-27 21:32:36 -04001326 audit_mq_notify(mqdes, notification);
George C. Wilson20ca73b2006-05-24 16:09:55 -05001327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 nc = NULL;
1329 sock = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001330 if (notification != NULL) {
1331 if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1332 notification->sigev_notify != SIGEV_SIGNAL &&
1333 notification->sigev_notify != SIGEV_THREAD))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return -EINVAL;
Al Viro0d060602017-06-27 21:32:36 -04001335 if (notification->sigev_notify == SIGEV_SIGNAL &&
1336 !valid_signal(notification->sigev_signo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 return -EINVAL;
1338 }
Al Viro0d060602017-06-27 21:32:36 -04001339 if (notification->sigev_notify == SIGEV_THREAD) {
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001340 long timeo;
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 /* create the notify skb */
1343 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
Markus Elfringc2317402019-09-25 16:48:17 -07001344 if (!nc)
1345 return -ENOMEM;
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 if (copy_from_user(nc->data,
Al Viro0d060602017-06-27 21:32:36 -04001348 notification->sigev_value.sival_ptr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 NOTIFY_COOKIE_LEN)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001350 ret = -EFAULT;
Markus Elfringc2317402019-09-25 16:48:17 -07001351 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 }
1353
1354 /* TODO: add a header? */
1355 skb_put(nc, NOTIFY_COOKIE_LEN);
1356 /* and attach it to the socket */
1357retry:
Al Viro0d060602017-06-27 21:32:36 -04001358 f = fdget(notification->sigev_signo);
Al Viro2903ff02012-08-28 12:52:22 -04001359 if (!f.file) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001360 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001362 }
Al Viro2903ff02012-08-28 12:52:22 -04001363 sock = netlink_getsockbyfilp(f.file);
1364 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 if (IS_ERR(sock)) {
1366 ret = PTR_ERR(sock);
Markus Elfringc2317402019-09-25 16:48:17 -07001367 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 }
1369
Patrick McHardyc3d8d1e2007-11-07 02:42:09 -08001370 timeo = MAX_SCHEDULE_TIMEOUT;
Denis V. Lunev9457afe2008-06-05 11:23:39 -07001371 ret = netlink_attachskb(sock, nc, &timeo, NULL);
Cong Wangf991af32017-07-09 13:19:55 -07001372 if (ret == 1) {
1373 sock = NULL;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001374 goto retry;
Cong Wangf991af32017-07-09 13:19:55 -07001375 }
Markus Elfringc2317402019-09-25 16:48:17 -07001376 if (ret)
1377 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 }
1379 }
1380
Al Viro2903ff02012-08-28 12:52:22 -04001381 f = fdget(mqdes);
1382 if (!f.file) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001383 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 goto out;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Al Viro496ad9a2013-01-23 17:07:38 -05001387 inode = file_inode(f.file);
Al Viro2903ff02012-08-28 12:52:22 -04001388 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001389 ret = -EBADF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 goto out_fput;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 info = MQUEUE_I(inode);
1393
1394 ret = 0;
1395 spin_lock(&info->lock);
Al Viro0d060602017-06-27 21:32:36 -04001396 if (notification == NULL) {
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001397 if (info->notify_owner == task_tgid(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 remove_notification(info);
Deepa Dinamani078cd822016-09-14 07:48:04 -07001399 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001401 } else if (info->notify_owner != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 ret = -EBUSY;
1403 } else {
Al Viro0d060602017-06-27 21:32:36 -04001404 switch (notification->sigev_notify) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 case SIGEV_NONE:
1406 info->notify.sigev_notify = SIGEV_NONE;
1407 break;
1408 case SIGEV_THREAD:
1409 info->notify_sock = sock;
1410 info->notify_cookie = nc;
1411 sock = NULL;
1412 nc = NULL;
1413 info->notify.sigev_notify = SIGEV_THREAD;
1414 break;
1415 case SIGEV_SIGNAL:
Al Viro0d060602017-06-27 21:32:36 -04001416 info->notify.sigev_signo = notification->sigev_signo;
1417 info->notify.sigev_value = notification->sigev_value;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 info->notify.sigev_notify = SIGEV_SIGNAL;
Oleg Nesterovb5f20062020-05-07 18:35:39 -07001419 info->notify_self_exec_id = current->self_exec_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 break;
1421 }
Cedric Le Goatera03fcb72006-10-02 02:17:26 -07001422
1423 info->notify_owner = get_pid(task_tgid(current));
Eric W. Biederman6f9ac6d2011-11-16 22:57:55 -08001424 info->notify_user_ns = get_user_ns(current_user_ns());
Deepa Dinamani078cd822016-09-14 07:48:04 -07001425 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 }
1427 spin_unlock(&info->lock);
1428out_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001429 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430out:
Davidlohr Bueso3ab08fe2014-01-27 17:07:06 -08001431 if (sock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 netlink_detachskb(sock, nc);
Markus Elfring97b0b1a2019-09-25 16:48:14 -07001433 else
Markus Elfringc2317402019-09-25 16:48:17 -07001434free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 dev_kfree_skb(nc);
Davidlohr Bueso3ab08fe2014-01-27 17:07:06 -08001436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 return ret;
1438}
1439
Al Viro0d060602017-06-27 21:32:36 -04001440SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1441 const struct sigevent __user *, u_notification)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442{
Al Viro0d060602017-06-27 21:32:36 -04001443 struct sigevent n, *p = NULL;
1444 if (u_notification) {
1445 if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1446 return -EFAULT;
1447 p = &n;
1448 }
1449 return do_mq_notify(mqdes, p);
1450}
1451
1452static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1453{
Al Viro2903ff02012-08-28 12:52:22 -04001454 struct fd f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 struct inode *inode;
1456 struct mqueue_inode_info *info;
1457
Al Viro0d060602017-06-27 21:32:36 -04001458 if (new && (new->mq_flags & (~O_NONBLOCK)))
1459 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
Al Viro2903ff02012-08-28 12:52:22 -04001461 f = fdget(mqdes);
Al Viro0d060602017-06-27 21:32:36 -04001462 if (!f.file)
1463 return -EBADF;
1464
1465 if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1466 fdput(f);
1467 return -EBADF;
André Goddard Rosa8d8ffef2010-02-23 04:04:26 -03001468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
Al Viro496ad9a2013-01-23 17:07:38 -05001470 inode = file_inode(f.file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 info = MQUEUE_I(inode);
1472
1473 spin_lock(&info->lock);
1474
Al Viro0d060602017-06-27 21:32:36 -04001475 if (old) {
1476 *old = info->attr;
1477 old->mq_flags = f.file->f_flags & O_NONBLOCK;
1478 }
1479 if (new) {
1480 audit_mq_getsetattr(mqdes, new);
Al Viro2903ff02012-08-28 12:52:22 -04001481 spin_lock(&f.file->f_lock);
Al Viro0d060602017-06-27 21:32:36 -04001482 if (new->mq_flags & O_NONBLOCK)
Al Viro2903ff02012-08-28 12:52:22 -04001483 f.file->f_flags |= O_NONBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 else
Al Viro2903ff02012-08-28 12:52:22 -04001485 f.file->f_flags &= ~O_NONBLOCK;
1486 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Deepa Dinamani078cd822016-09-14 07:48:04 -07001488 inode->i_atime = inode->i_ctime = current_time(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 }
1490
1491 spin_unlock(&info->lock);
Al Viro2903ff02012-08-28 12:52:22 -04001492 fdput(f);
Al Viro0d060602017-06-27 21:32:36 -04001493 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494}
1495
Al Viro0d060602017-06-27 21:32:36 -04001496SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1497 const struct mq_attr __user *, u_mqstat,
1498 struct mq_attr __user *, u_omqstat)
1499{
1500 int ret;
1501 struct mq_attr mqstat, omqstat;
1502 struct mq_attr *new = NULL, *old = NULL;
1503
1504 if (u_mqstat) {
1505 new = &mqstat;
1506 if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1507 return -EFAULT;
1508 }
1509 if (u_omqstat)
1510 old = &omqstat;
1511
1512 ret = do_mq_getsetattr(mqdes, new, old);
1513 if (ret || !old)
1514 return ret;
1515
1516 if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1517 return -EFAULT;
1518 return 0;
1519}
1520
1521#ifdef CONFIG_COMPAT
1522
1523struct compat_mq_attr {
1524 compat_long_t mq_flags; /* message queue flags */
1525 compat_long_t mq_maxmsg; /* maximum number of messages */
1526 compat_long_t mq_msgsize; /* maximum message size */
1527 compat_long_t mq_curmsgs; /* number of messages currently queued */
1528 compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1529};
1530
1531static inline int get_compat_mq_attr(struct mq_attr *attr,
1532 const struct compat_mq_attr __user *uattr)
1533{
1534 struct compat_mq_attr v;
1535
1536 if (copy_from_user(&v, uattr, sizeof(*uattr)))
1537 return -EFAULT;
1538
1539 memset(attr, 0, sizeof(*attr));
1540 attr->mq_flags = v.mq_flags;
1541 attr->mq_maxmsg = v.mq_maxmsg;
1542 attr->mq_msgsize = v.mq_msgsize;
1543 attr->mq_curmsgs = v.mq_curmsgs;
1544 return 0;
1545}
1546
1547static inline int put_compat_mq_attr(const struct mq_attr *attr,
1548 struct compat_mq_attr __user *uattr)
1549{
1550 struct compat_mq_attr v;
1551
1552 memset(&v, 0, sizeof(v));
1553 v.mq_flags = attr->mq_flags;
1554 v.mq_maxmsg = attr->mq_maxmsg;
1555 v.mq_msgsize = attr->mq_msgsize;
1556 v.mq_curmsgs = attr->mq_curmsgs;
1557 if (copy_to_user(uattr, &v, sizeof(*uattr)))
1558 return -EFAULT;
1559 return 0;
1560}
1561
1562COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1563 int, oflag, compat_mode_t, mode,
1564 struct compat_mq_attr __user *, u_attr)
1565{
1566 struct mq_attr attr, *p = NULL;
1567 if (u_attr && oflag & O_CREAT) {
1568 p = &attr;
1569 if (get_compat_mq_attr(&attr, u_attr))
1570 return -EFAULT;
1571 }
1572 return do_mq_open(u_name, oflag, mode, p);
1573}
1574
Arnd Bergmannb0d17572018-04-13 13:58:23 +02001575COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1576 const struct compat_sigevent __user *, u_notification)
1577{
1578 struct sigevent n, *p = NULL;
1579 if (u_notification) {
1580 if (get_compat_sigevent(&n, u_notification))
1581 return -EFAULT;
1582 if (n.sigev_notify == SIGEV_THREAD)
1583 n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1584 p = &n;
1585 }
1586 return do_mq_notify(mqdes, p);
1587}
1588
1589COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1590 const struct compat_mq_attr __user *, u_mqstat,
1591 struct compat_mq_attr __user *, u_omqstat)
1592{
1593 int ret;
1594 struct mq_attr mqstat, omqstat;
1595 struct mq_attr *new = NULL, *old = NULL;
1596
1597 if (u_mqstat) {
1598 new = &mqstat;
1599 if (get_compat_mq_attr(new, u_mqstat))
1600 return -EFAULT;
1601 }
1602 if (u_omqstat)
1603 old = &omqstat;
1604
1605 ret = do_mq_getsetattr(mqdes, new, old);
1606 if (ret || !old)
1607 return ret;
1608
1609 if (put_compat_mq_attr(old, u_omqstat))
1610 return -EFAULT;
1611 return 0;
1612}
1613#endif
1614
1615#ifdef CONFIG_COMPAT_32BIT_TIME
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001616static int compat_prepare_timeout(const struct old_timespec32 __user *p,
Deepa Dinamanib9047722017-08-02 19:51:11 -07001617 struct timespec64 *ts)
Al Viro0d060602017-06-27 21:32:36 -04001618{
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02001619 if (get_old_timespec32(ts, p))
Al Viro0d060602017-06-27 21:32:36 -04001620 return -EFAULT;
Deepa Dinamanib9047722017-08-02 19:51:11 -07001621 if (!timespec64_valid(ts))
Al Viro0d060602017-06-27 21:32:36 -04001622 return -EINVAL;
1623 return 0;
1624}
1625
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001626SYSCALL_DEFINE5(mq_timedsend_time32, mqd_t, mqdes,
1627 const char __user *, u_msg_ptr,
1628 unsigned int, msg_len, unsigned int, msg_prio,
1629 const struct old_timespec32 __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001630{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001631 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001632 if (u_abs_timeout) {
1633 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1634 if (res)
1635 return res;
1636 p = &ts;
1637 }
1638 return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1639}
1640
Arnd Bergmann8dabe722019-01-07 00:33:08 +01001641SYSCALL_DEFINE5(mq_timedreceive_time32, mqd_t, mqdes,
1642 char __user *, u_msg_ptr,
1643 unsigned int, msg_len, unsigned int __user *, u_msg_prio,
1644 const struct old_timespec32 __user *, u_abs_timeout)
Al Viro0d060602017-06-27 21:32:36 -04001645{
Deepa Dinamanib9047722017-08-02 19:51:11 -07001646 struct timespec64 ts, *p = NULL;
Al Viro0d060602017-06-27 21:32:36 -04001647 if (u_abs_timeout) {
1648 int res = compat_prepare_timeout(u_abs_timeout, &ts);
1649 if (res)
1650 return res;
1651 p = &ts;
1652 }
1653 return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1654}
Al Viro0d060602017-06-27 21:32:36 -04001655#endif
1656
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001657static const struct inode_operations mqueue_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 .lookup = simple_lookup,
1659 .create = mqueue_create,
1660 .unlink = mqueue_unlink,
1661};
1662
Arjan van de Ven9a321442007-02-12 00:55:35 -08001663static const struct file_operations mqueue_file_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 .flush = mqueue_flush_file,
1665 .poll = mqueue_poll_file,
1666 .read = mqueue_read_file,
Arnd Bergmann6038f372010-08-15 18:52:59 +02001667 .llseek = default_llseek,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668};
1669
Alexey Dobriyanb87221d2009-09-21 17:01:09 -07001670static const struct super_operations mqueue_super_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 .alloc_inode = mqueue_alloc_inode,
Al Viro015d7952019-04-15 22:30:30 -04001672 .free_inode = mqueue_free_inode,
Al Viro6d8af642010-06-05 16:29:45 -04001673 .evict_inode = mqueue_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 .statfs = simple_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675};
1676
David Howells935c69122018-11-01 23:07:25 +00001677static const struct fs_context_operations mqueue_fs_context_ops = {
1678 .free = mqueue_fs_context_free,
1679 .get_tree = mqueue_get_tree,
1680};
1681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682static struct file_system_type mqueue_fs_type = {
David Howells935c69122018-11-01 23:07:25 +00001683 .name = "mqueue",
1684 .init_fs_context = mqueue_init_fs_context,
1685 .kill_sb = kill_litter_super,
1686 .fs_flags = FS_USERNS_MOUNT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687};
1688
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001689int mq_init_ns(struct ipc_namespace *ns)
1690{
David Howells935c69122018-11-01 23:07:25 +00001691 struct vfsmount *m;
1692
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001693 ns->mq_queues_count = 0;
1694 ns->mq_queues_max = DFLT_QUEUESMAX;
1695 ns->mq_msg_max = DFLT_MSGMAX;
1696 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
KOSAKI Motohirocef01842012-05-31 16:26:33 -07001697 ns->mq_msg_default = DFLT_MSG;
1698 ns->mq_msgsize_default = DFLT_MSGSIZE;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001699
David Howells935c69122018-11-01 23:07:25 +00001700 m = mq_create_mount(ns);
1701 if (IS_ERR(m))
1702 return PTR_ERR(m);
1703 ns->mq_mnt = m;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001704 return 0;
1705}
1706
1707void mq_clear_sbinfo(struct ipc_namespace *ns)
1708{
Eric W. Biedermancfb2f6f2018-03-24 11:28:14 -05001709 ns->mq_mnt->mnt_sb->s_fs_info = NULL;
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001710}
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712static int __init init_mqueue_fs(void)
1713{
1714 int error;
1715
1716 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1717 sizeof(struct mqueue_inode_info), 0,
Vladimir Davydov5d097052016-01-14 15:18:21 -08001718 SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 if (mqueue_inode_cachep == NULL)
1720 return -ENOMEM;
1721
Alexey Gladkovdc55e352022-02-14 19:18:14 +01001722 if (!setup_mq_sysctls(&init_ipc_ns)) {
1723 pr_warn("sysctl registration failed\n");
Zhengchao Shao12b677f2022-12-09 17:29:29 +08001724 error = -ENOMEM;
1725 goto out_kmem;
Alexey Gladkovdc55e352022-02-14 19:18:14 +01001726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 error = register_filesystem(&mqueue_fs_type);
1729 if (error)
1730 goto out_sysctl;
1731
Serge E. Hallyn7eafd7c2009-04-06 19:01:10 -07001732 spin_lock_init(&mq_lock);
1733
Al Viro6f686572011-12-09 00:38:50 -05001734 error = mq_init_ns(&init_ipc_ns);
1735 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 goto out_filesystem;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 return 0;
1739
1740out_filesystem:
1741 unregister_filesystem(&mqueue_fs_type);
1742out_sysctl:
Hangyu Huac579d602022-07-15 14:23:01 +08001743 retire_mq_sysctls(&init_ipc_ns);
Zhengchao Shao12b677f2022-12-09 17:29:29 +08001744out_kmem:
1745 kmem_cache_destroy(mqueue_inode_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 return error;
1747}
1748
Davidlohr Bueso6d08a252014-04-07 15:39:18 -07001749device_initcall(init_mqueue_fs);