blob: bec09a3a1d44ce56d43e16583fdf3b417cce4033 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Pavel Emelyanov22931d32011-12-15 02:44:35 +00002#include <linux/types.h>
3#include <linux/spinlock.h>
4#include <linux/sock_diag.h>
5#include <linux/unix_diag.h>
6#include <linux/skbuff.h>
Cyrill Gorcunov2ea744a2011-12-20 04:33:03 +00007#include <linux/module.h>
Felipe Gaspercae99102019-05-20 19:43:51 -05008#include <linux/uidgid.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +00009#include <net/netlink.h>
10#include <net/af_unix.h>
11#include <net/tcp_states.h>
Felipe Gaspercae99102019-05-20 19:43:51 -050012#include <net/sock.h>
Pavel Emelyanov22931d32011-12-15 02:44:35 +000013
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000014static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{
Kuniyuki Iwashima2f7ca902022-06-21 10:19:13 -070016 /* might or might not have a hash table lock */
Al Viroae3b5642019-02-15 20:09:35 +000017 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000018
Thomas Graf42453752012-06-26 23:36:10 +000019 if (!addr)
20 return 0;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000021
Kuniyuki Iwashima755662c2021-11-24 11:14:19 +090022 return nla_put(nlskb, UNIX_DIAG_NAME,
23 addr->len - offsetof(struct sockaddr_un, sun_path),
Thomas Graf42453752012-06-26 23:36:10 +000024 addr->name->sun_path);
Pavel Emelyanovf5248b42011-12-15 02:45:24 +000025}
26
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000027static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
28{
Al Viro40ffe672012-03-14 21:54:32 -040029 struct dentry *dentry = unix_sk(sk)->path.dentry;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000030
31 if (dentry) {
Thomas Graf42453752012-06-26 23:36:10 +000032 struct unix_diag_vfs uv = {
David Howellsa25b3762015-03-17 22:26:21 +000033 .udiag_vfs_ino = d_backing_inode(dentry)->i_ino,
Thomas Graf42453752012-06-26 23:36:10 +000034 .udiag_vfs_dev = dentry->d_sb->s_dev,
35 };
36
37 return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000038 }
39
40 return 0;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +000041}
42
Pavel Emelyanovac02be82011-12-15 02:45:58 +000043static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
44{
45 struct sock *peer;
46 int ino;
47
48 peer = unix_peer_get(sk);
49 if (peer) {
50 unix_state_lock(peer);
51 ino = sock_i_ino(peer);
52 unix_state_unlock(peer);
53 sock_put(peer);
54
Thomas Graf42453752012-06-26 23:36:10 +000055 return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
Pavel Emelyanovac02be82011-12-15 02:45:58 +000056 }
57
58 return 0;
Pavel Emelyanovac02be82011-12-15 02:45:58 +000059}
60
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000061static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
62{
63 struct sk_buff *skb;
Thomas Graf42453752012-06-26 23:36:10 +000064 struct nlattr *attr;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000065 u32 *buf;
66 int i;
67
68 if (sk->sk_state == TCP_LISTEN) {
69 spin_lock(&sk->sk_receive_queue.lock);
Thomas Graf42453752012-06-26 23:36:10 +000070
71 attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
72 sk->sk_receive_queue.qlen * sizeof(u32));
73 if (!attr)
74 goto errout;
75
76 buf = nla_data(attr);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000077 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer;
80
81 req = skb->sk;
82 /*
83 * The state lock is outer for the same sk's
84 * queue lock. With the other's queue locked it's
85 * OK to lock the state.
86 */
87 unix_state_lock_nested(req);
88 peer = unix_sk(req)->peer;
David S. Millere09e9d12011-12-26 14:41:55 -050089 buf[i++] = (peer ? sock_i_ino(peer) : 0);
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000090 unix_state_unlock(req);
91 }
92 spin_unlock(&sk->sk_receive_queue.lock);
93 }
94
95 return 0;
96
Thomas Graf42453752012-06-26 23:36:10 +000097errout:
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +000098 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE;
100}
101
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{
Thomas Graf42453752012-06-26 23:36:10 +0000104 struct unix_diag_rqlen rql;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000105
106 if (sk->sk_state == TCP_LISTEN) {
Thomas Graf42453752012-06-26 23:36:10 +0000107 rql.udiag_rqueue = sk->sk_receive_queue.qlen;
108 rql.udiag_wqueue = sk->sk_max_ack_backlog;
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000109 } else {
Thomas Graf42453752012-06-26 23:36:10 +0000110 rql.udiag_rqueue = (u32) unix_inq_len(sk);
111 rql.udiag_wqueue = (u32) unix_outq_len(sk);
Pavel Emelyanovc9da99e2011-12-30 00:54:39 +0000112 }
113
Thomas Graf42453752012-06-26 23:36:10 +0000114 return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000115}
116
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900117static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
118 struct user_namespace *user_ns)
Felipe Gaspercae99102019-05-20 19:43:51 -0500119{
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900120 uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
Felipe Gaspercae99102019-05-20 19:43:51 -0500121 return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
122}
123
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000124static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900125 struct user_namespace *user_ns,
126 u32 portid, u32 seq, u32 flags, int sk_ino)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000127{
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000128 struct nlmsghdr *nlh;
129 struct unix_diag_msg *rep;
130
Eric W. Biederman15e47302012-09-07 20:12:54 +0000131 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
Thomas Graf42453752012-06-26 23:36:10 +0000132 flags);
David S. Millerb61bb012012-06-26 21:41:00 -0700133 if (!nlh)
Thomas Graf42453752012-06-26 23:36:10 +0000134 return -EMSGSIZE;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000135
David S. Millerb61bb012012-06-26 21:41:00 -0700136 rep = nlmsg_data(nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000137 rep->udiag_family = AF_UNIX;
138 rep->udiag_type = sk->sk_type;
139 rep->udiag_state = sk->sk_state;
Mathias Krause6865d1e2013-09-30 22:05:40 +0200140 rep->pad = 0;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000141 rep->udiag_ino = sk_ino;
142 sock_diag_save_cookie(sk, rep->udiag_cookie);
143
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000144 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000145 sk_diag_dump_name(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700146 goto out_nlmsg_trim;
Pavel Emelyanovf5248b42011-12-15 02:45:24 +0000147
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000148 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000149 sk_diag_dump_vfs(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700150 goto out_nlmsg_trim;
Pavel Emelyanov5f7b0562011-12-15 02:45:43 +0000151
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000152 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000153 sk_diag_dump_peer(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700154 goto out_nlmsg_trim;
Pavel Emelyanovac02be82011-12-15 02:45:58 +0000155
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000156 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000157 sk_diag_dump_icons(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700158 goto out_nlmsg_trim;
Pavel Emelyanov2aac7a22011-12-15 02:46:14 +0000159
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000160 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000161 sk_diag_show_rqlen(sk, skb))
David S. Millerb61bb012012-06-26 21:41:00 -0700162 goto out_nlmsg_trim;
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000163
164 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
165 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
David S. Millerb61bb012012-06-26 21:41:00 -0700166 goto out_nlmsg_trim;
Pavel Emelyanovcbf39192011-12-15 02:46:31 +0000167
Pavel Emelyanove4e541a2012-10-23 22:29:56 +0400168 if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
169 goto out_nlmsg_trim;
170
Felipe Gaspercae99102019-05-20 19:43:51 -0500171 if ((req->udiag_show & UDIAG_SHOW_UID) &&
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900172 sk_diag_dump_uid(sk, skb, user_ns))
Felipe Gaspercae99102019-05-20 19:43:51 -0500173 goto out_nlmsg_trim;
174
Johannes Berg053c0952015-01-16 22:09:00 +0100175 nlmsg_end(skb, nlh);
176 return 0;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000177
David S. Millerb61bb012012-06-26 21:41:00 -0700178out_nlmsg_trim:
Thomas Graf42453752012-06-26 23:36:10 +0000179 nlmsg_cancel(skb, nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000180 return -EMSGSIZE;
181}
182
183static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900184 struct user_namespace *user_ns,
185 u32 portid, u32 seq, u32 flags)
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000186{
187 int sk_ino;
188
189 unix_state_lock(sk);
190 sk_ino = sock_i_ino(sk);
191 unix_state_unlock(sk);
192
193 if (!sk_ino)
194 return 0;
195
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900196 return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000197}
198
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000199static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
200{
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000201 struct net *net = sock_net(skb->sk);
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700202 int num, s_num, slot, s_slot;
203 struct unix_diag_req *req;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000204
David S. Millerb61bb012012-06-26 21:41:00 -0700205 req = nlmsg_data(cb->nlh);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000206
207 s_slot = cb->args[0];
208 num = s_num = cb->args[1];
209
Kuniyuki Iwashimaf302d182022-06-21 10:19:09 -0700210 for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) {
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000211 struct sock *sk;
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000212
213 num = 0;
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700214 spin_lock(&net->unx.table.locks[slot]);
Kuniyuki Iwashimacf2f2252022-06-21 10:19:12 -0700215 sk_for_each(sk, &net->unx.table.buckets[slot]) {
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000216 if (num < s_num)
217 goto next;
218 if (!(req->udiag_states & (1 << sk->sk_state)))
219 goto next;
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900220 if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
Eric W. Biederman15e47302012-09-07 20:12:54 +0000221 NETLINK_CB(cb->skb).portid,
Pavel Emelyanov257b5292011-12-30 09:27:43 +0000222 cb->nlh->nlmsg_seq,
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900223 NLM_F_MULTI) < 0) {
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700224 spin_unlock(&net->unx.table.locks[slot]);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000225 goto done;
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900226 }
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000227next:
228 num++;
229 }
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700230 spin_unlock(&net->unx.table.locks[slot]);
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000231 }
232done:
Pavel Emelyanov45a96b92011-12-15 02:44:52 +0000233 cb->args[0] = slot;
234 cb->args[1] = num;
235
236 return skb->len;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000237}
238
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700239static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino)
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000240{
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000241 struct sock *sk;
Kuniyuki Iwashimaafd20b92021-11-24 11:14:30 +0900242 int i;
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000243
Kuniyuki Iwashimaf302d182022-06-21 10:19:09 -0700244 for (i = 0; i < UNIX_HASH_SIZE; i++) {
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700245 spin_lock(&net->unx.table.locks[i]);
Kuniyuki Iwashimacf2f2252022-06-21 10:19:12 -0700246 sk_for_each(sk, &net->unx.table.buckets[i]) {
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000247 if (ino == sock_i_ino(sk)) {
248 sock_hold(sk);
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700249 spin_unlock(&net->unx.table.locks[i]);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000250 return sk;
251 }
Kuniyuki Iwashimacf2f2252022-06-21 10:19:12 -0700252 }
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700253 spin_unlock(&net->unx.table.locks[i]);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000254 }
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000255 return NULL;
256}
257
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000258static int unix_diag_get_exact(struct sk_buff *in_skb,
259 const struct nlmsghdr *nlh,
260 struct unix_diag_req *req)
261{
Andrey Vagin51d7ccc2012-07-16 04:28:49 +0000262 struct net *net = sock_net(in_skb->sk);
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700263 unsigned int extra_len;
264 struct sk_buff *rep;
265 struct sock *sk;
266 int err;
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000267
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700268 err = -EINVAL;
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000269 if (req->udiag_ino == 0)
270 goto out_nosk;
271
Kuniyuki Iwashima79b05be2022-06-21 10:19:11 -0700272 sk = unix_lookup_by_ino(net, req->udiag_ino);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000273 err = -ENOENT;
274 if (sk == NULL)
275 goto out_nosk;
276
277 err = sock_diag_check_cookie(sk, req->udiag_cookie);
278 if (err)
279 goto out;
280
281 extra_len = 256;
282again:
283 err = -ENOMEM;
Thomas Graf42453752012-06-26 23:36:10 +0000284 rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000285 if (!rep)
286 goto out;
287
Kuniyuki Iwashimab3abe42e2022-11-27 10:24:11 +0900288 err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
289 NETLINK_CB(in_skb).portid,
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000290 nlh->nlmsg_seq, 0, req->udiag_ino);
291 if (err < 0) {
Thomas Graf42453752012-06-26 23:36:10 +0000292 nlmsg_free(rep);
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000293 extra_len += 256;
294 if (extra_len >= PAGE_SIZE)
295 goto out;
296
297 goto again;
298 }
Yajun Deng01757f52021-07-13 10:48:24 +0800299 err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
300
Pavel Emelyanov5d3cae82011-12-15 02:45:07 +0000301out:
302 if (sk)
303 sock_put(sk);
304out_nosk:
305 return err;
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000306}
307
308static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
309{
310 int hdrlen = sizeof(struct unix_diag_req);
311
312 if (nlmsg_len(h) < hdrlen)
313 return -EINVAL;
314
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000315 if (h->nlmsg_flags & NLM_F_DUMP) {
316 struct netlink_dump_control c = {
317 .dump = unix_diag_dump,
318 };
Kuniyuki Iwashima340c3d32022-06-21 10:19:08 -0700319 return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c);
Pablo Neira Ayuso80d326f2012-02-24 14:30:15 +0000320 } else
David S. Millerb61bb012012-06-26 21:41:00 -0700321 return unix_diag_get_exact(skb, h, nlmsg_data(h));
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000322}
323
Shan Wei8dcf01f2012-04-24 18:21:07 +0000324static const struct sock_diag_handler unix_diag_handler = {
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000325 .family = AF_UNIX,
326 .dump = unix_diag_handler_dump,
327};
328
329static int __init unix_diag_init(void)
330{
331 return sock_diag_register(&unix_diag_handler);
332}
333
334static void __exit unix_diag_exit(void)
335{
336 sock_diag_unregister(&unix_diag_handler);
337}
338
339module_init(unix_diag_init);
340module_exit(unix_diag_exit);
341MODULE_LICENSE("GPL");
Jakub Kicinski938dbea2023-11-18 19:30:06 -0800342MODULE_DESCRIPTION("UNIX socket monitoring via SOCK_DIAG");
Pavel Emelyanov22931d32011-12-15 02:44:35 +0000343MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);