blob: df2a35b5714a8727d84b019382bbc63052064bbe [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Tom Parkin20dcb112020-07-22 17:32:06 +01002/* L2TPv3 IP encapsulation support
James Chapman0d767512010-04-02 06:19:00 +00003 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
James Chapman0d767512010-04-02 06:19:00 +00005 */
6
Joe Perchesa4ca44f2012-05-16 09:55:56 +00007#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
Eric Dumazet72fb96e72017-02-09 16:15:52 -08009#include <asm/ioctls.h>
James Chapman0d767512010-04-02 06:19:00 +000010#include <linux/icmp.h>
11#include <linux/module.h>
12#include <linux/skbuff.h>
13#include <linux/random.h>
14#include <linux/socket.h>
15#include <linux/l2tp.h>
16#include <linux/in.h>
17#include <net/sock.h>
18#include <net/ip.h>
19#include <net/icmp.h>
20#include <net/udp.h>
21#include <net/inet_common.h>
James Chapman0d767512010-04-02 06:19:00 +000022#include <net/tcp_states.h>
23#include <net/protocol.h>
24#include <net/xfrm.h>
25
26#include "l2tp_core.h"
27
28struct l2tp_ip_sock {
29 /* inet_sock has to be the first member of l2tp_ip_sock */
30 struct inet_sock inet;
31
James Chapmanc8657fd2012-04-29 21:48:48 +000032 u32 conn_id;
33 u32 peer_conn_id;
James Chapman0d767512010-04-02 06:19:00 +000034};
35
36static DEFINE_RWLOCK(l2tp_ip_lock);
37static struct hlist_head l2tp_ip_table;
38static struct hlist_head l2tp_ip_bind_table;
39
40static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
41{
42 return (struct l2tp_ip_sock *)sk;
43}
44
Guillaume Naulta9b2dff2016-12-30 19:48:20 +010045static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
46 __be32 raddr, int dif, u32 tunnel_id)
James Chapman0d767512010-04-02 06:19:00 +000047{
James Chapman0d767512010-04-02 06:19:00 +000048 struct sock *sk;
49
Sasha Levinb67bfe02013-02-27 17:06:00 -080050 sk_for_each_bound(sk, &l2tp_ip_bind_table) {
Guillaume Naultbb39b0bd2017-01-06 20:03:55 +010051 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
52 const struct inet_sock *inet = inet_sk(sk);
James Chapman0d767512010-04-02 06:19:00 +000053
Guillaume Naultc5fdae02017-01-06 20:03:57 +010054 if (!net_eq(sock_net(sk), net))
55 continue;
56
57 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
58 continue;
59
60 if (inet->inet_rcv_saddr && laddr &&
61 inet->inet_rcv_saddr != laddr)
62 continue;
63
64 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
65 continue;
66
67 if (l2tp->conn_id != tunnel_id)
68 continue;
69
70 goto found;
James Chapman0d767512010-04-02 06:19:00 +000071 }
72
73 sk = NULL;
74found:
75 return sk;
76}
77
James Chapman0d767512010-04-02 06:19:00 +000078/* When processing receive frames, there are two cases to
79 * consider. Data frames consist of a non-zero session-id and an
80 * optional cookie. Control frames consist of a regular L2TP header
81 * preceded by 32-bits of zeros.
82 *
83 * L2TPv3 Session Header Over IP
84 *
85 * 0 1 2 3
86 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
87 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
88 * | Session ID |
89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
90 * | Cookie (optional, maximum 64 bits)...
91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
92 * |
93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
94 *
95 * L2TPv3 Control Message Header Over IP
96 *
97 * 0 1 2 3
98 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * | (32 bits of zeros) |
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * | Control Connection ID |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * | Ns | Nr |
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 *
109 * All control frames are passed to userspace.
110 */
111static int l2tp_ip_recv(struct sk_buff *skb)
112{
David S. Miller9d6ddb12013-02-05 14:36:02 -0500113 struct net *net = dev_net(skb->dev);
James Chapman0d767512010-04-02 06:19:00 +0000114 struct sock *sk;
115 u32 session_id;
116 u32 tunnel_id;
117 unsigned char *ptr, *optr;
118 struct l2tp_session *session;
119 struct l2tp_tunnel *tunnel = NULL;
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100120 struct iphdr *iph;
James Chapman0d767512010-04-02 06:19:00 +0000121 int length;
James Chapman0d767512010-04-02 06:19:00 +0000122
James Chapman0d767512010-04-02 06:19:00 +0000123 if (!pskb_may_pull(skb, 4))
124 goto discard;
125
Haishuang Yan5745b8232016-04-03 22:09:23 +0800126 /* Point to L2TP header */
Tom Parkin95075152020-07-24 16:31:49 +0100127 optr = skb->data;
128 ptr = skb->data;
Tom Parkinb71a61c2020-07-22 17:32:05 +0100129 session_id = ntohl(*((__be32 *)ptr));
James Chapman0d767512010-04-02 06:19:00 +0000130 ptr += 4;
131
132 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
133 * the session_id. If it is 0, the packet is a L2TP control
134 * frame and the session_id value can be discarded.
135 */
136 if (session_id == 0) {
137 __skb_pull(skb, 4);
138 goto pass_up;
139 }
140
141 /* Ok, this is a data packet. Lookup the session. */
Guillaume Nault01e28b92018-08-10 13:21:57 +0200142 session = l2tp_session_get(net, session_id);
Guillaume Nault61b9a042017-03-31 13:02:25 +0200143 if (!session)
James Chapman0d767512010-04-02 06:19:00 +0000144 goto discard;
145
146 tunnel = session->tunnel;
Guillaume Nault61b9a042017-03-31 13:02:25 +0200147 if (!tunnel)
148 goto discard_sess;
James Chapman0d767512010-04-02 06:19:00 +0000149
150 /* Trace packet contents, if enabled */
151 if (tunnel->debug & L2TP_MSG_DATA) {
152 length = min(32u, skb->len);
153 if (!pskb_may_pull(skb, length))
Guillaume Nault61b9a042017-03-31 13:02:25 +0200154 goto discard_sess;
James Chapman0d767512010-04-02 06:19:00 +0000155
Haishuang Yan5745b8232016-04-03 22:09:23 +0800156 /* Point to L2TP header */
Tom Parkin95075152020-07-24 16:31:49 +0100157 optr = skb->data;
158 ptr = skb->data;
Haishuang Yan5745b8232016-04-03 22:09:23 +0800159 ptr += 4;
Joe Perchesa4ca44f2012-05-16 09:55:56 +0000160 pr_debug("%s: ip recv\n", tunnel->name);
161 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
James Chapman0d767512010-04-02 06:19:00 +0000162 }
163
Jacob Wen4522a702019-01-30 14:55:14 +0800164 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
165 goto discard_sess;
166
Guillaume Nault2b139e62018-07-25 14:53:33 +0200167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
Guillaume Nault61b9a042017-03-31 13:02:25 +0200168 l2tp_session_dec_refcount(session);
James Chapman0d767512010-04-02 06:19:00 +0000169
170 return 0;
171
172pass_up:
173 /* Get the tunnel_id from the L2TP header */
174 if (!pskb_may_pull(skb, 12))
175 goto discard;
176
177 if ((skb->data[0] & 0xc0) != 0xc0)
178 goto discard;
179
Tom Parkinb71a61c2020-07-22 17:32:05 +0100180 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100181 iph = (struct iphdr *)skb_network_header(skb);
James Chapman0d767512010-04-02 06:19:00 +0000182
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100183 read_lock_bh(&l2tp_ip_lock);
184 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
185 tunnel_id);
186 if (!sk) {
James Chapman0d767512010-04-02 06:19:00 +0000187 read_unlock_bh(&l2tp_ip_lock);
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100188 goto discard;
James Chapman0d767512010-04-02 06:19:00 +0000189 }
Guillaume Nault8f7dc9a2017-11-03 16:49:00 +0100190 sock_hold(sk);
191 read_unlock_bh(&l2tp_ip_lock);
James Chapman0d767512010-04-02 06:19:00 +0000192
James Chapman0d767512010-04-02 06:19:00 +0000193 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
194 goto discard_put;
195
Florian Westphal895b5c92019-09-29 20:54:03 +0200196 nf_reset_ct(skb);
James Chapman0d767512010-04-02 06:19:00 +0000197
198 return sk_receive_skb(sk, skb, 1);
199
Guillaume Nault61b9a042017-03-31 13:02:25 +0200200discard_sess:
Guillaume Nault61b9a042017-03-31 13:02:25 +0200201 l2tp_session_dec_refcount(session);
202 goto discard;
203
James Chapman0d767512010-04-02 06:19:00 +0000204discard_put:
205 sock_put(sk);
206
207discard:
208 kfree_skb(skb);
209 return 0;
210}
211
Eric Dumazet02c71b12020-05-29 11:20:53 -0700212static int l2tp_ip_hash(struct sock *sk)
213{
214 if (sk_unhashed(sk)) {
215 write_lock_bh(&l2tp_ip_lock);
216 sk_add_node(sk, &l2tp_ip_table);
217 write_unlock_bh(&l2tp_ip_lock);
218 }
219 return 0;
220}
221
222static void l2tp_ip_unhash(struct sock *sk)
223{
224 if (sk_unhashed(sk))
225 return;
226 write_lock_bh(&l2tp_ip_lock);
227 sk_del_node_init(sk);
228 write_unlock_bh(&l2tp_ip_lock);
229}
230
James Chapman0d767512010-04-02 06:19:00 +0000231static int l2tp_ip_open(struct sock *sk)
232{
233 /* Prevent autobind. We don't have ports. */
234 inet_sk(sk)->inet_num = IPPROTO_L2TP;
235
Eric Dumazet02c71b12020-05-29 11:20:53 -0700236 l2tp_ip_hash(sk);
James Chapman0d767512010-04-02 06:19:00 +0000237 return 0;
238}
239
240static void l2tp_ip_close(struct sock *sk, long timeout)
241{
242 write_lock_bh(&l2tp_ip_lock);
243 hlist_del_init(&sk->sk_bind_node);
James Chapmand1f224a2012-04-10 00:10:42 +0000244 sk_del_node_init(sk);
James Chapman0d767512010-04-02 06:19:00 +0000245 write_unlock_bh(&l2tp_ip_lock);
246 sk_common_release(sk);
247}
248
249static void l2tp_ip_destroy_sock(struct sock *sk)
250{
251 struct sk_buff *skb;
James Chapmand00fa9a2018-02-23 17:45:45 +0000252 struct l2tp_tunnel *tunnel = sk->sk_user_data;
James Chapman0d767512010-04-02 06:19:00 +0000253
254 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
255 kfree_skb(skb);
256
James Chapmand00fa9a2018-02-23 17:45:45 +0000257 if (tunnel)
258 l2tp_tunnel_delete(tunnel);
James Chapman0d767512010-04-02 06:19:00 +0000259}
260
261static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
262{
263 struct inet_sock *inet = inet_sk(sk);
Tom Parkinb71a61c2020-07-22 17:32:05 +0100264 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
David S. Miller9d6ddb12013-02-05 14:36:02 -0500265 struct net *net = sock_net(sk);
James Chapmanc51ce492012-05-29 03:30:42 +0000266 int ret;
James Chapman0d767512010-04-02 06:19:00 +0000267 int chk_addr_ret;
268
James Chapmanc51ce492012-05-29 03:30:42 +0000269 if (addr_len < sizeof(struct sockaddr_l2tpip))
270 return -EINVAL;
271 if (addr->l2tp_family != AF_INET)
272 return -EINVAL;
273
James Chapman0d767512010-04-02 06:19:00 +0000274 lock_sock(sk);
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100275
276 ret = -EINVAL;
Guillaume Nault32c23112016-11-18 22:13:00 +0100277 if (!sock_flag(sk, SOCK_ZAPPED))
278 goto out;
279
Guillaume Nault8cf2f702017-01-06 20:03:54 +0100280 if (sk->sk_state != TCP_CLOSE)
James Chapman0d767512010-04-02 06:19:00 +0000281 goto out;
282
David S. Miller9d6ddb12013-02-05 14:36:02 -0500283 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
James Chapman0d767512010-04-02 06:19:00 +0000284 ret = -EADDRNOTAVAIL;
285 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
286 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
287 goto out;
288
Tom Parkin95075152020-07-24 16:31:49 +0100289 if (addr->l2tp_addr.s_addr) {
290 inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
291 inet->inet_saddr = addr->l2tp_addr.s_addr;
292 }
James Chapman0d767512010-04-02 06:19:00 +0000293 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
294 inet->inet_saddr = 0; /* Use device */
James Chapman0d767512010-04-02 06:19:00 +0000295
296 write_lock_bh(&l2tp_ip_lock);
Guillaume Naulta9b2dff2016-12-30 19:48:20 +0100297 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100298 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
299 write_unlock_bh(&l2tp_ip_lock);
300 ret = -EADDRINUSE;
301 goto out;
302 }
303
304 sk_dst_reset(sk);
305 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
306
James Chapman0d767512010-04-02 06:19:00 +0000307 sk_add_bind_node(sk, &l2tp_ip_bind_table);
308 sk_del_node_init(sk);
309 write_unlock_bh(&l2tp_ip_lock);
Guillaume Naultd5e3a192016-11-29 13:09:46 +0100310
James Chapman0d767512010-04-02 06:19:00 +0000311 ret = 0;
James Chapmanc51ce492012-05-29 03:30:42 +0000312 sock_reset_flag(sk, SOCK_ZAPPED);
313
James Chapman0d767512010-04-02 06:19:00 +0000314out:
315 release_sock(sk);
316
317 return ret;
James Chapman0d767512010-04-02 06:19:00 +0000318}
319
320static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
321{
Tom Parkinb71a61c2020-07-22 17:32:05 +0100322 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
James Chapmande3c7a12012-04-29 21:48:47 +0000323 int rc;
James Chapman0d767512010-04-02 06:19:00 +0000324
James Chapman0d767512010-04-02 06:19:00 +0000325 if (addr_len < sizeof(*lsa))
James Chapmande3c7a12012-04-29 21:48:47 +0000326 return -EINVAL;
James Chapman0d767512010-04-02 06:19:00 +0000327
James Chapmande3c7a12012-04-29 21:48:47 +0000328 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
329 return -EINVAL;
330
David S. Miller2f162702011-05-08 13:39:01 -0700331 lock_sock(sk);
332
Guillaume Nault0382a252016-11-29 13:09:44 +0100333 /* Must bind first - autobinding does not work */
334 if (sock_flag(sk, SOCK_ZAPPED)) {
335 rc = -EINVAL;
336 goto out_sk;
337 }
338
339 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
340 if (rc < 0)
341 goto out_sk;
342
James Chapman0d767512010-04-02 06:19:00 +0000343 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
344
James Chapman0d767512010-04-02 06:19:00 +0000345 write_lock_bh(&l2tp_ip_lock);
346 hlist_del_init(&sk->sk_bind_node);
347 sk_add_bind_node(sk, &l2tp_ip_bind_table);
348 write_unlock_bh(&l2tp_ip_lock);
349
Guillaume Nault0382a252016-11-29 13:09:44 +0100350out_sk:
David S. Miller2f162702011-05-08 13:39:01 -0700351 release_sock(sk);
Guillaume Nault0382a252016-11-29 13:09:44 +0100352
James Chapman0d767512010-04-02 06:19:00 +0000353 return rc;
354}
355
James Chapmanc51ce492012-05-29 03:30:42 +0000356static int l2tp_ip_disconnect(struct sock *sk, int flags)
357{
358 if (sock_flag(sk, SOCK_ZAPPED))
359 return 0;
360
Eric Dumazet286c72d2016-10-20 09:39:40 -0700361 return __udp_disconnect(sk, flags);
James Chapmanc51ce492012-05-29 03:30:42 +0000362}
363
James Chapman0d767512010-04-02 06:19:00 +0000364static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100365 int peer)
James Chapman0d767512010-04-02 06:19:00 +0000366{
367 struct sock *sk = sock->sk;
368 struct inet_sock *inet = inet_sk(sk);
369 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
370 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
371
372 memset(lsa, 0, sizeof(*lsa));
373 lsa->l2tp_family = AF_INET;
374 if (peer) {
375 if (!inet->inet_dport)
376 return -ENOTCONN;
377 lsa->l2tp_conn_id = lsk->peer_conn_id;
378 lsa->l2tp_addr.s_addr = inet->inet_daddr;
379 } else {
380 __be32 addr = inet->inet_rcv_saddr;
Tom Parkinb71a61c2020-07-22 17:32:05 +0100381
James Chapman0d767512010-04-02 06:19:00 +0000382 if (!addr)
383 addr = inet->inet_saddr;
384 lsa->l2tp_conn_id = lsk->conn_id;
385 lsa->l2tp_addr.s_addr = addr;
386 }
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100387 return sizeof(*lsa);
James Chapman0d767512010-04-02 06:19:00 +0000388}
389
390static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
391{
392 int rc;
393
James Chapman0d767512010-04-02 06:19:00 +0000394 /* Charge it to the socket, dropping if the queue is full. */
395 rc = sock_queue_rcv_skb(sk, skb);
396 if (rc < 0)
397 goto drop;
398
399 return 0;
400
401drop:
David S. Miller9d6ddb12013-02-05 14:36:02 -0500402 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
James Chapman0d767512010-04-02 06:19:00 +0000403 kfree_skb(skb);
Paul Hüber51fb60e2017-02-26 17:58:19 +0100404 return 0;
James Chapman0d767512010-04-02 06:19:00 +0000405}
406
407/* Userspace will call sendmsg() on the tunnel socket to send L2TP
408 * control frames.
409 */
Ying Xue1b784142015-03-02 15:37:48 +0800410static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
James Chapman0d767512010-04-02 06:19:00 +0000411{
412 struct sk_buff *skb;
413 int rc;
James Chapman0d767512010-04-02 06:19:00 +0000414 struct inet_sock *inet = inet_sk(sk);
James Chapman0d767512010-04-02 06:19:00 +0000415 struct rtable *rt = NULL;
David S. Millerfdbb0f02011-05-08 13:48:37 -0700416 struct flowi4 *fl4;
James Chapman0d767512010-04-02 06:19:00 +0000417 int connected = 0;
418 __be32 daddr;
419
David S. Miller2f162702011-05-08 13:39:01 -0700420 lock_sock(sk);
421
422 rc = -ENOTCONN;
James Chapman0d767512010-04-02 06:19:00 +0000423 if (sock_flag(sk, SOCK_DEAD))
David S. Miller2f162702011-05-08 13:39:01 -0700424 goto out;
James Chapman0d767512010-04-02 06:19:00 +0000425
426 /* Get and verify the address. */
427 if (msg->msg_name) {
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100428 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
Tom Parkinb71a61c2020-07-22 17:32:05 +0100429
David S. Miller2f162702011-05-08 13:39:01 -0700430 rc = -EINVAL;
James Chapman0d767512010-04-02 06:19:00 +0000431 if (msg->msg_namelen < sizeof(*lip))
David S. Miller2f162702011-05-08 13:39:01 -0700432 goto out;
James Chapman0d767512010-04-02 06:19:00 +0000433
434 if (lip->l2tp_family != AF_INET) {
David S. Miller2f162702011-05-08 13:39:01 -0700435 rc = -EAFNOSUPPORT;
James Chapman0d767512010-04-02 06:19:00 +0000436 if (lip->l2tp_family != AF_UNSPEC)
David S. Miller2f162702011-05-08 13:39:01 -0700437 goto out;
James Chapman0d767512010-04-02 06:19:00 +0000438 }
439
440 daddr = lip->l2tp_addr.s_addr;
441 } else {
Sasha Levin84768edbb2012-05-02 03:58:43 +0000442 rc = -EDESTADDRREQ;
James Chapman0d767512010-04-02 06:19:00 +0000443 if (sk->sk_state != TCP_ESTABLISHED)
Sasha Levin84768edbb2012-05-02 03:58:43 +0000444 goto out;
James Chapman0d767512010-04-02 06:19:00 +0000445
446 daddr = inet->inet_daddr;
447 connected = 1;
448 }
449
450 /* Allocate a socket buffer */
451 rc = -ENOMEM;
452 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
453 4 + len, 0, GFP_KERNEL);
454 if (!skb)
455 goto error;
456
457 /* Reserve space for headers, putting IP header on 4-byte boundary. */
458 skb_reserve(skb, 2 + NET_SKB_PAD);
459 skb_reset_network_header(skb);
460 skb_reserve(skb, sizeof(struct iphdr));
461 skb_reset_transport_header(skb);
462
463 /* Insert 0 session_id */
Tom Parkinb71a61c2020-07-22 17:32:05 +0100464 *((__be32 *)skb_put(skb, 4)) = 0;
James Chapman0d767512010-04-02 06:19:00 +0000465
466 /* Copy user data into skb */
Al Viro6ce8e9c2014-04-06 21:25:44 -0400467 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
James Chapman0d767512010-04-02 06:19:00 +0000468 if (rc < 0) {
469 kfree_skb(skb);
470 goto error;
471 }
472
David S. Millerfdbb0f02011-05-08 13:48:37 -0700473 fl4 = &inet->cork.fl.u.ip4;
James Chapman0d767512010-04-02 06:19:00 +0000474 if (connected)
Tom Parkinb71a61c2020-07-22 17:32:05 +0100475 rt = (struct rtable *)__sk_dst_check(sk, 0);
James Chapman0d767512010-04-02 06:19:00 +0000476
Eric Dumazet081b1b12011-06-11 22:27:09 +0000477 rcu_read_lock();
Tom Parkin0febc7b2020-07-23 12:29:50 +0100478 if (!rt) {
Eric Dumazet081b1b12011-06-11 22:27:09 +0000479 const struct ip_options_rcu *inet_opt;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000480
David S. Miller778865a2011-04-28 13:54:06 -0700481 inet_opt = rcu_dereference(inet->inet_opt);
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000482
James Chapman0d767512010-04-02 06:19:00 +0000483 /* Use correct destination address if we have options. */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000484 if (inet_opt && inet_opt->opt.srr)
485 daddr = inet_opt->opt.faddr;
James Chapman0d767512010-04-02 06:19:00 +0000486
David S. Miller78fbfd82011-03-12 00:00:52 -0500487 /* If this fails, retransmit mechanism of transport layer will
488 * keep trying until route appears or the connection times
489 * itself out.
490 */
David S. Millerfdbb0f02011-05-08 13:48:37 -0700491 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
David S. Miller78fbfd82011-03-12 00:00:52 -0500492 daddr, inet->inet_saddr,
493 inet->inet_dport, inet->inet_sport,
494 sk->sk_protocol, RT_CONN_FLAGS(sk),
495 sk->sk_bound_dev_if);
496 if (IS_ERR(rt))
497 goto no_route;
Eric Dumazet4399a4d2012-06-08 06:25:00 +0000498 if (connected) {
Eric Dumazet081b1b12011-06-11 22:27:09 +0000499 sk_setup_caps(sk, &rt->dst);
Eric Dumazet4399a4d2012-06-08 06:25:00 +0000500 } else {
501 skb_dst_set(skb, &rt->dst);
502 goto xmit;
503 }
James Chapman0d767512010-04-02 06:19:00 +0000504 }
Eric Dumazet081b1b12011-06-11 22:27:09 +0000505
506 /* We dont need to clone dst here, it is guaranteed to not disappear.
507 * __dev_xmit_skb() might force a refcount if needed.
508 */
509 skb_dst_set_noref(skb, &rt->dst);
James Chapman0d767512010-04-02 06:19:00 +0000510
Eric Dumazet4399a4d2012-06-08 06:25:00 +0000511xmit:
James Chapman0d767512010-04-02 06:19:00 +0000512 /* Queue the packet to IP for output */
Eric Dumazetb0270e92014-04-15 12:58:34 -0400513 rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
Eric Dumazet081b1b12011-06-11 22:27:09 +0000514 rcu_read_unlock();
James Chapman0d767512010-04-02 06:19:00 +0000515
516error:
James Chapmanc8657fd2012-04-29 21:48:48 +0000517 if (rc >= 0)
James Chapman0d767512010-04-02 06:19:00 +0000518 rc = len;
James Chapman0d767512010-04-02 06:19:00 +0000519
David S. Miller2f162702011-05-08 13:39:01 -0700520out:
521 release_sock(sk);
James Chapman0d767512010-04-02 06:19:00 +0000522 return rc;
523
524no_route:
Eric Dumazet081b1b12011-06-11 22:27:09 +0000525 rcu_read_unlock();
James Chapman0d767512010-04-02 06:19:00 +0000526 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
527 kfree_skb(skb);
David S. Miller2f162702011-05-08 13:39:01 -0700528 rc = -EHOSTUNREACH;
529 goto out;
James Chapman0d767512010-04-02 06:19:00 +0000530}
531
Ying Xue1b784142015-03-02 15:37:48 +0800532static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
James Chapman0d767512010-04-02 06:19:00 +0000533 size_t len, int noblock, int flags, int *addr_len)
534{
535 struct inet_sock *inet = inet_sk(sk);
James Chapman0d767512010-04-02 06:19:00 +0000536 size_t copied = 0;
537 int err = -EOPNOTSUPP;
Steffen Hurrle342dfc32014-01-17 22:53:15 +0100538 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
James Chapman0d767512010-04-02 06:19:00 +0000539 struct sk_buff *skb;
540
541 if (flags & MSG_OOB)
542 goto out;
543
James Chapman0d767512010-04-02 06:19:00 +0000544 skb = skb_recv_datagram(sk, flags, noblock, &err);
545 if (!skb)
546 goto out;
547
548 copied = skb->len;
549 if (len < copied) {
550 msg->msg_flags |= MSG_TRUNC;
551 copied = len;
552 }
553
David S. Miller51f3d022014-11-05 16:46:40 -0500554 err = skb_copy_datagram_msg(skb, 0, msg, copied);
James Chapman0d767512010-04-02 06:19:00 +0000555 if (err)
556 goto done;
557
558 sock_recv_timestamp(msg, sk, skb);
559
560 /* Copy the address. */
561 if (sin) {
562 sin->sin_family = AF_INET;
563 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
564 sin->sin_port = 0;
565 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
Hannes Frederic Sowabceaa902013-11-18 04:20:45 +0100566 *addr_len = sizeof(*sin);
James Chapman0d767512010-04-02 06:19:00 +0000567 }
568 if (inet->cmsg_flags)
569 ip_cmsg_recv(msg, skb);
570 if (flags & MSG_TRUNC)
571 copied = skb->len;
572done:
573 skb_free_datagram(sk, skb);
574out:
James Chapmanc8657fd2012-04-29 21:48:48 +0000575 return err ? err : copied;
James Chapman0d767512010-04-02 06:19:00 +0000576}
577
Eric Dumazet72fb96e72017-02-09 16:15:52 -0800578int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
579{
580 struct sk_buff *skb;
581 int amount;
582
583 switch (cmd) {
584 case SIOCOUTQ:
585 amount = sk_wmem_alloc_get(sk);
586 break;
587 case SIOCINQ:
588 spin_lock_bh(&sk->sk_receive_queue.lock);
589 skb = skb_peek(&sk->sk_receive_queue);
590 amount = skb ? skb->len : 0;
591 spin_unlock_bh(&sk->sk_receive_queue.lock);
592 break;
593
594 default:
595 return -ENOIOCTLCMD;
596 }
597
598 return put_user(amount, (int __user *)arg);
599}
Tom Parkinca7885d2020-07-28 18:20:32 +0100600EXPORT_SYMBOL_GPL(l2tp_ioctl);
Eric Dumazet72fb96e72017-02-09 16:15:52 -0800601
stephen hemmingerfc130842010-10-21 07:50:46 +0000602static struct proto l2tp_ip_prot = {
James Chapman0d767512010-04-02 06:19:00 +0000603 .name = "L2TP/IP",
604 .owner = THIS_MODULE,
605 .init = l2tp_ip_open,
606 .close = l2tp_ip_close,
607 .bind = l2tp_ip_bind,
608 .connect = l2tp_ip_connect,
James Chapmanc51ce492012-05-29 03:30:42 +0000609 .disconnect = l2tp_ip_disconnect,
Eric Dumazet72fb96e72017-02-09 16:15:52 -0800610 .ioctl = l2tp_ioctl,
James Chapman0d767512010-04-02 06:19:00 +0000611 .destroy = l2tp_ip_destroy_sock,
612 .setsockopt = ip_setsockopt,
613 .getsockopt = ip_getsockopt,
614 .sendmsg = l2tp_ip_sendmsg,
615 .recvmsg = l2tp_ip_recvmsg,
616 .backlog_rcv = l2tp_ip_backlog_recv,
Eric Dumazet02c71b12020-05-29 11:20:53 -0700617 .hash = l2tp_ip_hash,
618 .unhash = l2tp_ip_unhash,
James Chapman0d767512010-04-02 06:19:00 +0000619 .obj_size = sizeof(struct l2tp_ip_sock),
James Chapman0d767512010-04-02 06:19:00 +0000620};
621
622static const struct proto_ops l2tp_ip_ops = {
623 .family = PF_INET,
624 .owner = THIS_MODULE,
625 .release = inet_release,
626 .bind = inet_bind,
627 .connect = inet_dgram_connect,
628 .socketpair = sock_no_socketpair,
629 .accept = sock_no_accept,
630 .getname = l2tp_ip_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700631 .poll = datagram_poll,
James Chapman0d767512010-04-02 06:19:00 +0000632 .ioctl = inet_ioctl,
Arnd Bergmannc7cbdbf2019-04-17 22:51:48 +0200633 .gettstamp = sock_gettstamp,
James Chapman0d767512010-04-02 06:19:00 +0000634 .listen = sock_no_listen,
635 .shutdown = inet_shutdown,
636 .setsockopt = sock_common_setsockopt,
637 .getsockopt = sock_common_getsockopt,
638 .sendmsg = inet_sendmsg,
639 .recvmsg = sock_common_recvmsg,
640 .mmap = sock_no_mmap,
641 .sendpage = sock_no_sendpage,
James Chapman0d767512010-04-02 06:19:00 +0000642};
643
644static struct inet_protosw l2tp_ip_protosw = {
645 .type = SOCK_DGRAM,
646 .protocol = IPPROTO_L2TP,
647 .prot = &l2tp_ip_prot,
648 .ops = &l2tp_ip_ops,
James Chapman0d767512010-04-02 06:19:00 +0000649};
650
651static struct net_protocol l2tp_ip_protocol __read_mostly = {
652 .handler = l2tp_ip_recv,
David S. Miller9d6ddb12013-02-05 14:36:02 -0500653 .netns_ok = 1,
James Chapman0d767512010-04-02 06:19:00 +0000654};
655
656static int __init l2tp_ip_init(void)
657{
658 int err;
659
Joe Perchesa4ca44f2012-05-16 09:55:56 +0000660 pr_info("L2TP IP encapsulation support (L2TPv3)\n");
James Chapman0d767512010-04-02 06:19:00 +0000661
662 err = proto_register(&l2tp_ip_prot, 1);
663 if (err != 0)
664 goto out;
665
666 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
667 if (err)
668 goto out1;
669
670 inet_register_protosw(&l2tp_ip_protosw);
671 return 0;
672
673out1:
674 proto_unregister(&l2tp_ip_prot);
675out:
676 return err;
677}
678
679static void __exit l2tp_ip_exit(void)
680{
681 inet_unregister_protosw(&l2tp_ip_protosw);
682 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
683 proto_unregister(&l2tp_ip_prot);
684}
685
686module_init(l2tp_ip_init);
687module_exit(l2tp_ip_exit);
688
689MODULE_LICENSE("GPL");
690MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
691MODULE_DESCRIPTION("L2TP over IP");
692MODULE_VERSION("1.0");
Michal Mareke8d34a882010-12-06 02:39:12 +0000693
Lucas De Marchie9c54992011-04-26 23:28:26 -0700694/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
Michal Mareke8d34a882010-12-06 02:39:12 +0000695 * enums
696 */
697MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
stephen hemminger163c2e22015-09-23 21:33:35 -0700698MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);