blob: 1a15105137397be158cc4a5357513b4e1b82bf97 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c42017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
Mat Martineau35b2c322020-01-09 07:59:21 -080078const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -080084 const struct in6_addr *addr,
85 int l3index)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Eric Dumazet93a77c12019-03-19 07:01:08 -070091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070094 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070095 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
Eric Dumazetf5d54762019-04-01 03:09:20 -070098 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070099
Eric Dumazetf5d54762019-04-01 03:09:20 -0700100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700101}
102
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000106
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800107 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Eric Dumazetca777ef2014-09-08 08:06:07 -0700110 sk->sk_rx_dst = dst;
111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700113 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000114}
115
Eric Dumazet84b114b2017-05-05 06:56:54 -0700116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700125{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700127 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800150 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000153 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800154 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500155 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct dst_entry *dst;
157 int addr_type;
158 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -EINVAL;
163
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900164 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000165 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Miller4c9483b2011-03-12 16:22:43 -0500167 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400175 if (IS_ERR(flowlabel))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
Weilong Chen4c99aa42013-12-19 18:44:34 +0800195 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
David Ahern54dc3e32018-01-04 14:03:54 -0800204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
Eric Dumazet0f317462019-10-10 20:17:41 -0700219 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
Eric Dumazetefe42082013-10-03 15:42:29 -0700222 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500223 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * TCP over IPv4
227 */
228
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500229 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 struct sockaddr_in sin;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800240 icsk->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100242 mptcpv6_handle_mapped(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Peter Krystadcec37a62020-01-21 16:56:18 -0800253 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100254 mptcpv6_handle_mapped(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 return err;
264 }
265
Eric Dumazetefe42082013-10-03 15:42:29 -0700266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700270 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000271 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900276 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800279 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
David S. Miller4c9483b2011-03-12 16:22:43 -0500281 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700282
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Ian Morris63159f22015-03-29 14:00:04 +0100289 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500290 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700291 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
294 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000295 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700298 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800299 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800301 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800311 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto late_failure;
314
Tom Herbert877d1f62015-07-28 16:02:05 -0700315 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530316
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300317 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300318 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700326 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Wei Wang19f6d3f2017-01-23 10:59:22 -0800329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000343 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sk->sk_route_caps = 0;
345 return err;
346}
347
Eric Dumazet563d34d2012-07-23 09:48:52 +0200348static void tcp_v6_mtu_reduced(struct sock *sk)
349{
350 struct dst_entry *dst;
351
352 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
353 return;
354
355 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
356 if (!dst)
357 return;
358
359 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
360 tcp_sync_mss(sk, dst_mtu(dst));
361 tcp_simple_retransmit(sk);
362 }
363}
364
Stefano Brivio32bbd872018-11-08 12:19:21 +0100365static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700366 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800368 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300369 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700370 struct net *net = dev_net(skb->dev);
371 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700373 struct tcp_sock *tp;
374 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800376 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazet22150892015-03-22 10:22:23 -0700379 sk = __inet6_lookup_established(net, &tcp_hashinfo,
380 &hdr->daddr, th->dest,
381 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700382 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Eric Dumazet22150892015-03-22 10:22:23 -0700384 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700385 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
386 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100387 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389
390 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700391 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100392 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
Eric Dumazet22150892015-03-22 10:22:23 -0700394 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800395 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100396 if (sk->sk_state == TCP_NEW_SYN_RECV) {
397 tcp_req_err(sk, seq, fatal);
398 return 0;
399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200402 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700403 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 if (sk->sk_state == TCP_CLOSE)
406 goto out;
407
Eric Dumazet93a77c12019-03-19 07:01:08 -0700408 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700409 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700410 goto out;
411 }
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700414 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700415 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700416 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700418 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700419 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 goto out;
421 }
422
Eric Dumazet93a77c12019-03-19 07:01:08 -0700423 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
David S. Millerec18d9a2012-07-12 00:25:15 -0700425 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100426 if (!sock_owned_by_user(sk)) {
427 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700428
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100429 if (dst)
430 dst->ops->redirect(dst, sk, skb);
431 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000432 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700433 }
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000436 /* We are not interested in TCP_LISTEN and open_requests
437 * (SYN-ACKs send out by Linux are always <576bytes so
438 * they should go through unfragmented).
439 */
440 if (sk->sk_state == TCP_LISTEN)
441 goto out;
442
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100443 if (!ip6_sk_accept_pmtu(sk))
444 goto out;
445
Eric Dumazet563d34d2012-07-23 09:48:52 +0200446 tp->mtu_info = ntohl(info);
447 if (!sock_owned_by_user(sk))
448 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000449 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800450 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000451 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 goto out;
453 }
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700456 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700459 case TCP_SYN_RECV:
460 /* Only in fast or simultaneous open. If a fast open socket is
Randy Dunlap634a63e2020-09-17 21:35:17 -0700461 * already accepted it is treated as a connected one below.
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700462 */
Ian Morris63159f22015-03-29 14:00:04 +0100463 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700464 break;
465
Eric Dumazet45af29c2020-05-24 11:00:02 -0700466 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 sk->sk_err = err;
470 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
471
472 tcp_done(sk);
473 } else
474 sk->sk_err_soft = err;
475 goto out;
Eric Dumazetd2924562020-05-27 17:34:58 -0700476 case TCP_LISTEN:
477 break;
478 default:
479 /* check if this ICMP message allows revert of backoff.
480 * (see RFC 6069)
481 */
482 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
483 code == ICMPV6_NOROUTE)
484 tcp_ld_RTO_revert(sk, seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
487 if (!sock_owned_by_user(sk) && np->recverr) {
488 sk->sk_err = err;
489 sk->sk_error_report(sk);
490 } else
491 sk->sk_err_soft = err;
492
493out:
494 bh_unlock_sock(sk);
495 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100496 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499
Eric Dumazet0f935db2015-09-25 07:39:21 -0700500static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300501 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000502 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700503 struct tcp_fastopen_cookie *foc,
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700504 enum tcp_synack_type synack_type,
505 struct sk_buff *syn_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700507 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700508 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400509 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300510 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800511 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000512 int err = -ENOMEM;
Wei Wangac8f1712020-09-09 17:50:48 -0700513 u8 tclass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000515 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700516 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
517 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800518 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000519
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700520 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Neal Cardwell94942182012-06-28 12:34:20 +0000521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700523 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
524 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Eric Dumazet634fb9792013-10-09 15:21:29 -0700526 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100527 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100528 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
529
Wei Wangac8f1712020-09-09 17:50:48 -0700530 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
Alexander Duyck861602b2020-11-19 13:23:51 -0800531 tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
532 np->tclass;
Alexander Duyck407c85c2020-11-20 19:47:44 -0800533
534 if (!INET_ECN_is_capable(tclass) &&
535 tcp_bpf_ca_needs_ecn((struct sock *)req))
536 tclass |= INET_ECN_ECT_0;
537
538 rcu_read_lock();
539 opt = ireq->ipv6_opt;
Huw Davies56ac42b2016-06-27 15:05:28 -0400540 if (!opt)
541 opt = rcu_dereference(np->opt);
Wei Wangac8f1712020-09-09 17:50:48 -0700542 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
Alexander Duyck861602b2020-11-19 13:23:51 -0800543 tclass, sk->sk_priority);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800544 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200545 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 return err;
550}
551
Octavian Purdila72659ec2010-01-17 19:09:39 -0800552
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700553static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Huw Davies56ac42b2016-06-27 15:05:28 -0400555 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700556 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800559#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700560static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -0800561 const struct in6_addr *addr,
562 int l3index)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800563{
David Aherndea53bb2019-12-30 14:14:28 -0800564 return tcp_md5_do_lookup(sk, l3index,
565 (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800566}
567
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700568static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700569 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800570{
David Aherndea53bb2019-12-30 14:14:28 -0800571 int l3index;
572
573 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
574 addr_sk->sk_bound_dev_if);
575 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
576 l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800577}
578
Ivan Delalande8917a772017-06-15 18:07:07 -0700579static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200580 sockptr_t optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800581{
582 struct tcp_md5sig cmd;
583 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
David Aherndea53bb2019-12-30 14:14:28 -0800584 int l3index = 0;
Ivan Delalande8917a772017-06-15 18:07:07 -0700585 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800586
587 if (optlen < sizeof(cmd))
588 return -EINVAL;
589
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200590 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800591 return -EFAULT;
592
593 if (sin6->sin6_family != AF_INET6)
594 return -EINVAL;
595
Ivan Delalande8917a772017-06-15 18:07:07 -0700596 if (optname == TCP_MD5SIG_EXT &&
597 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
598 prefixlen = cmd.tcpm_prefixlen;
599 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
600 prefixlen > 32))
601 return -EINVAL;
602 } else {
603 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
604 }
605
David Ahern6b102db2019-12-30 14:14:29 -0800606 if (optname == TCP_MD5SIG_EXT &&
607 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
608 struct net_device *dev;
609
610 rcu_read_lock();
611 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
612 if (dev && netif_is_l3_master(dev))
613 l3index = dev->ifindex;
614 rcu_read_unlock();
615
616 /* ok to reference set/not set outside of rcu;
617 * right now device MUST be an L3 master
618 */
619 if (!dev || !l3index)
620 return -EINVAL;
621 }
622
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800623 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700624 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000625 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Ahern6b102db2019-12-30 14:14:29 -0800626 AF_INET, prefixlen,
627 l3index);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000628 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800629 AF_INET6, prefixlen, l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800630 }
631
632 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
633 return -EINVAL;
634
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000635 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
636 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Aherndea53bb2019-12-30 14:14:28 -0800637 AF_INET, prefixlen, l3index,
638 cmd.tcpm_key, cmd.tcpm_keylen,
639 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800640
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000641 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800642 AF_INET6, prefixlen, l3index,
643 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800644}
645
Eric Dumazet19689e32016-06-27 18:51:53 +0200646static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
647 const struct in6_addr *daddr,
648 const struct in6_addr *saddr,
649 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800650{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700652 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200653 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900654
Eric Dumazet19689e32016-06-27 18:51:53 +0200655 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800656 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000657 bp->saddr = *saddr;
658 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700659 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700660 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800661
Eric Dumazet19689e32016-06-27 18:51:53 +0200662 _th = (struct tcphdr *)(bp + 1);
663 memcpy(_th, th, sizeof(*th));
664 _th->check = 0;
665
666 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
667 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
668 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800669 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700670}
David S. Millerc7da57a2007-10-26 00:41:21 -0700671
Eric Dumazet19689e32016-06-27 18:51:53 +0200672static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000673 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400674 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700675{
676 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800677 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700678
679 hp = tcp_get_md5sig_pool();
680 if (!hp)
681 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800682 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700683
Herbert Xucf80e0e2016-01-24 21:20:23 +0800684 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700685 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200686 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700687 goto clear_hash;
688 if (tcp_md5_hash_key(hp, key))
689 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800690 ahash_request_set_crypt(req, NULL, md5_hash, 0);
691 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800692 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800693
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800694 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800695 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700696
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800697clear_hash:
698 tcp_put_md5sig_pool();
699clear_hash_noput:
700 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700701 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800702}
703
Eric Dumazet39f8e582015-03-24 15:58:55 -0700704static int tcp_v6_md5_hash_skb(char *md5_hash,
705 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400706 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400707 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800708{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000709 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700710 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800711 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400712 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800713
Eric Dumazet39f8e582015-03-24 15:58:55 -0700714 if (sk) { /* valid for establish/request sockets */
715 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700716 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700717 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000718 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700719 saddr = &ip6h->saddr;
720 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800721 }
Adam Langley49a72df2008-07-19 00:01:42 -0700722
723 hp = tcp_get_md5sig_pool();
724 if (!hp)
725 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800726 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700727
Herbert Xucf80e0e2016-01-24 21:20:23 +0800728 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700729 goto clear_hash;
730
Eric Dumazet19689e32016-06-27 18:51:53 +0200731 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700732 goto clear_hash;
733 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
734 goto clear_hash;
735 if (tcp_md5_hash_key(hp, key))
736 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800737 ahash_request_set_crypt(req, NULL, md5_hash, 0);
738 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700739 goto clear_hash;
740
741 tcp_put_md5sig_pool();
742 return 0;
743
744clear_hash:
745 tcp_put_md5sig_pool();
746clear_hash_noput:
747 memset(md5_hash, 0, 16);
748 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800749}
750
Eric Dumazetba8e2752015-10-02 11:43:28 -0700751#endif
752
753static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
David Ahernd14c77e2019-12-30 14:14:26 -0800754 const struct sk_buff *skb,
755 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800756{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700757#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400758 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800759 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000760 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400761 const struct tcphdr *th = tcp_hdr(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800762 int genhash, l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800763 u8 newhash[16];
764
David Aherndea53bb2019-12-30 14:14:28 -0800765 /* sdif set, means packet ingressed via a device
766 * in an L3 domain and dif is set to the l3mdev
767 */
768 l3index = sdif ? dif : 0;
769
770 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900771 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800772
David S. Miller785957d2008-07-30 03:03:15 -0700773 /* We've parsed the options - do we have a hash? */
774 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700775 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700776
777 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700778 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700779 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800780 }
781
David S. Miller785957d2008-07-30 03:03:15 -0700782 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700783 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700784 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800785 }
786
787 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700788 genhash = tcp_v6_md5_hash_skb(newhash,
789 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700790 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700791
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800792 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700793 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -0800794 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +0000795 genhash ? "failed" : "mismatch",
796 &ip6h->saddr, ntohs(th->source),
David Aherndea53bb2019-12-30 14:14:28 -0800797 &ip6h->daddr, ntohs(th->dest), l3index);
Eric Dumazetff74e232015-03-24 15:58:54 -0700798 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800799 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700800#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700801 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800802}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800803
Eric Dumazetb40cf182015-09-25 07:39:08 -0700804static void tcp_v6_init_req(struct request_sock *req,
805 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300806 struct sk_buff *skb)
807{
David Ahernc2027d12018-12-12 15:27:38 -0800808 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300809 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700810 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300811
812 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
813 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
814
Octavian Purdila16bea702014-06-25 17:09:53 +0300815 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800816 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300817 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700818 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300819
Eric Dumazet04317da2014-09-05 15:33:32 -0700820 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700821 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700822 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300823 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
824 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300825 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300826 ireq->pktopts = skb;
827 }
828}
829
Eric Dumazetf9646292015-09-29 07:42:50 -0700830static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100831 struct sk_buff *skb,
Eric Dumazetf9646292015-09-29 07:42:50 -0700832 struct flowi *fl,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100833 struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300834{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100835 tcp_v6_init_req(req, sk, skb);
836
837 if (security_inet_conn_request(sk, skb, req))
838 return NULL;
839
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700840 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300841}
842
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800843struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700845 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300846 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700847 .send_ack = tcp_v6_reqsk_send_ack,
848 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800849 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800850 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851};
852
Mat Martineau35b2c322020-01-09 07:59:21 -0800853const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300854 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
855 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300856#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700857 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000858 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800859#endif
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300860#ifdef CONFIG_SYN_COOKIES
861 .cookie_init_seq = cookie_v6_init_sequence,
862#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300863 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700864 .init_seq = tcp_v6_init_seq,
865 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300866 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300867};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800868
Eric Dumazeta00e7442015-09-29 07:42:39 -0700869static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800870 u32 ack, u32 win, u32 tsval, u32 tsecr,
871 int oif, struct tcp_md5sig_key *key, int rst,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700872 u8 tclass, __be32 label, u32 priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400874 const struct tcphdr *th = tcp_hdr(skb);
875 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500877 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800878 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800879 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb5734e2008-01-12 02:16:03 -0800880 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000881 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800882 __be32 *topt;
Jon Maxwell00483692018-05-10 16:53:51 +1000883 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Andrey Vaginee684b62013-02-11 05:50:19 +0000885 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700886 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800887#ifdef CONFIG_TCP_MD5SIG
888 if (key)
889 tot_len += TCPOLEN_MD5SIG_ALIGNED;
890#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
893 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100894 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return;
896
897 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
898
Johannes Bergd58ff352017-06-16 14:29:23 +0200899 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700900 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 /* Swap the send and the receive. */
903 memset(t1, 0, sizeof(*t1));
904 t1->dest = th->source;
905 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700906 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 t1->seq = htonl(seq);
908 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700909 t1->ack = !rst || !th->ack;
910 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800912
Al Viroe69a4ad2006-11-14 20:56:00 -0800913 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900914
Andrey Vaginee684b62013-02-11 05:50:19 +0000915 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800916 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
917 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000918 *topt++ = htonl(tsval);
919 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
921
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800922#ifdef CONFIG_TCP_MD5SIG
923 if (key) {
924 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
925 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700926 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700927 &ipv6_hdr(skb)->saddr,
928 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800929 }
930#endif
931
David S. Miller4c9483b2011-03-12 16:22:43 -0500932 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000933 fl6.daddr = ipv6_hdr(skb)->saddr;
934 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100935 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
David S. Millere5700af2010-04-21 14:59:20 -0700937 buff->ip_summed = CHECKSUM_PARTIAL;
938 buff->csum = 0;
939
David S. Miller4c9483b2011-03-12 16:22:43 -0500940 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
David S. Miller4c9483b2011-03-12 16:22:43 -0500942 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900943 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700944 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800945 else {
946 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
947 oif = skb->skb_iif;
948
949 fl6.flowi6_oif = oif;
950 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700951
Eric Dumazetc67b8552019-06-08 17:58:51 -0700952 if (sk) {
953 if (sk->sk_state == TCP_TIME_WAIT) {
954 mark = inet_twsk(sk)->tw_mark;
955 /* autoflowlabel relies on buff->hash */
956 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
957 PKT_HASH_TYPE_L4);
958 } else {
959 mark = sk->sk_mark;
960 }
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700961 buff->tstamp = tcp_transmit_time(sk);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700962 }
Jon Maxwell00483692018-05-10 16:53:51 +1000963 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500964 fl6.fl6_dport = t1->dest;
965 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900966 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500967 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700969 /* Pass a socket to ip6_dst_lookup either it is for RST
970 * Underlying function will use this to retrieve the network
971 * namespace
972 */
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100973 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800974 if (!IS_ERR(dst)) {
975 skb_dst_set(buff, dst);
Wei Wange92dd772020-09-08 14:29:02 -0700976 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
977 tclass & ~INET_ECN_MASK, priority);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700978 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800979 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700980 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800981 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 }
983
984 kfree_skb(buff);
985}
986
Eric Dumazeta00e7442015-09-29 07:42:39 -0700987static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700988{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400989 const struct tcphdr *th = tcp_hdr(skb);
Eric Dumazet323a53c2019-06-05 07:55:09 -0700990 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700991 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700992 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000993#ifdef CONFIG_TCP_MD5SIG
994 const __u8 *hash_location = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000995 unsigned char newhash[16];
996 int genhash;
997 struct sock *sk1 = NULL;
998#endif
Eric Dumazet323a53c2019-06-05 07:55:09 -0700999 __be32 label = 0;
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001000 u32 priority = 0;
Eric Dumazet323a53c2019-06-05 07:55:09 -07001001 struct net *net;
Song Liuc24b14c42017-10-23 09:20:24 -07001002 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001003
1004 if (th->rst)
1005 return;
1006
Eric Dumazetc3658e82014-11-25 07:40:04 -08001007 /* If sk not NULL, it means we did a successful lookup and incoming
1008 * route had to be correct. prequeue might have dropped our dst.
1009 */
1010 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001011 return;
1012
Eric Dumazet39209672019-06-07 12:23:48 -07001013 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001014#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001015 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001016 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +01001017 if (sk && sk_fullsock(sk)) {
David Aherndea53bb2019-12-30 14:14:28 -08001018 int l3index;
1019
1020 /* sdif set, means packet ingressed via a device
1021 * in an L3 domain and inet_iif is set to it.
1022 */
1023 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1024 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Florian Westphale46787f2015-12-21 21:29:25 +01001025 } else if (hash_location) {
David Ahernd14c77e2019-12-30 14:14:26 -08001026 int dif = tcp_v6_iif_l3_slave(skb);
1027 int sdif = tcp_v6_sdif(skb);
David Aherndea53bb2019-12-30 14:14:28 -08001028 int l3index;
David Ahernd14c77e2019-12-30 14:14:26 -08001029
Shawn Lu658ddaa2012-01-31 22:35:48 +00001030 /*
1031 * active side is lost. Try to find listening socket through
1032 * source port, and then find md5 key through listening socket.
1033 * we are not loose security here:
1034 * Incoming packet is checked with md5 hash with finding key,
1035 * no RST generated if md5 hash doesn't match.
1036 */
Eric Dumazet323a53c2019-06-05 07:55:09 -07001037 sk1 = inet6_lookup_listener(net,
Craig Galleka5836362016-02-10 11:50:38 -05001038 &tcp_hashinfo, NULL, 0,
1039 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +00001040 th->source, &ipv6h->daddr,
David Ahernd14c77e2019-12-30 14:14:26 -08001041 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001042 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001043 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001044
David Aherndea53bb2019-12-30 14:14:28 -08001045 /* sdif set, means packet ingressed via a device
1046 * in an L3 domain and dif is set to it.
1047 */
1048 l3index = tcp_v6_sdif(skb) ? dif : 0;
1049
1050 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001051 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001052 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001053
Eric Dumazet39f8e582015-03-24 15:58:55 -07001054 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001055 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001056 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001057 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001058#endif
1059
1060 if (th->ack)
1061 seq = ntohl(th->ack_seq);
1062 else
1063 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1064 (th->doff << 2);
1065
Song Liuc24b14c42017-10-23 09:20:24 -07001066 if (sk) {
1067 oif = sk->sk_bound_dev_if;
Eric Dumazet052e0692019-07-10 06:40:09 -07001068 if (sk_fullsock(sk)) {
1069 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1070
Song Liu5c487bb2018-02-06 20:50:23 -08001071 trace_tcp_send_reset(sk, skb);
Eric Dumazet052e0692019-07-10 06:40:09 -07001072 if (np->repflow)
1073 label = ip6_flowlabel(ipv6h);
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001074 priority = sk->sk_priority;
Eric Dumazet052e0692019-07-10 06:40:09 -07001075 }
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001076 if (sk->sk_state == TCP_TIME_WAIT) {
Eric Dumazet50a8acc2019-06-05 07:55:10 -07001077 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001078 priority = inet_twsk(sk)->tw_priority;
1079 }
Eric Dumazet323a53c2019-06-05 07:55:09 -07001080 } else {
Eric Dumazeta346abe2019-07-01 06:39:36 -07001081 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
Eric Dumazet323a53c2019-06-05 07:55:09 -07001082 label = ip6_flowlabel(ipv6h);
Song Liuc24b14c42017-10-23 09:20:24 -07001083 }
1084
Wei Wange92dd772020-09-08 14:29:02 -07001085 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1086 ipv6_get_dsfield(ipv6h), label, priority);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001087
1088#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001089out:
1090 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001091#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001092}
1093
Eric Dumazeta00e7442015-09-29 07:42:39 -07001094static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001095 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +01001096 struct tcp_md5sig_key *key, u8 tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001097 __be32 label, u32 priority)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001098{
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001099 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001100 tclass, label, priority);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001101}
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001105 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001106 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001108 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001110 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001111 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001112 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001114 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
Eric Dumazeta00e7442015-09-29 07:42:39 -07001117static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001118 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119{
David Aherndea53bb2019-12-30 14:14:28 -08001120 int l3index;
1121
1122 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1123
Daniel Lee3a19ce02014-05-11 20:22:13 -07001124 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1125 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1126 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001127 /* RFC 7323 2.3
1128 * The window field (SEG.WND) of every outgoing segment, with the
1129 * exception of <SYN> segments, MUST be right-shifted by
1130 * Rcv.Wind.Shift bits:
1131 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001132 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001133 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001134 tcp_rsk(req)->rcv_nxt,
1135 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001136 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001137 req->ts_recent, sk->sk_bound_dev_if,
David Aherndea53bb2019-12-30 14:14:28 -08001138 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
Wei Wange92dd772020-09-08 14:29:02 -07001139 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140}
1141
1142
Eric Dumazet079096f2015-10-02 11:43:32 -07001143static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001145#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001146 const struct tcphdr *th = tcp_hdr(skb);
1147
Florian Westphalaf9b4732010-06-03 00:43:44 +00001148 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001149 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150#endif
1151 return sk;
1152}
1153
Petar Penkov9349d602019-07-29 09:59:14 -07001154u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1155 struct tcphdr *th, u32 *cookie)
1156{
1157 u16 mss = 0;
1158#ifdef CONFIG_SYN_COOKIES
1159 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1160 &tcp_request_sock_ipv6_ops, sk, th);
1161 if (mss) {
1162 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1163 tcp_synq_overflow(sk);
1164 }
1165#endif
1166 return mss;
1167}
1168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1170{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (skb->protocol == htons(ETH_P_IP))
1172 return tcp_v4_conn_request(sk, skb);
1173
1174 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001175 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001177 return tcp_conn_request(&tcp6_request_sock_ops,
1178 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001181 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 return 0; /* don't send reset */
1183}
1184
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001185static void tcp_v6_restore_cb(struct sk_buff *skb)
1186{
1187 /* We need to move header back to the beginning if xfrm6_policy_check()
1188 * and tcp_v6_fill_cb() are going to be called again.
1189 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1190 */
1191 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1192 sizeof(struct inet6_skb_parm));
1193}
1194
Eric Dumazet0c271712015-09-29 07:42:48 -07001195static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001196 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001197 struct dst_entry *dst,
1198 struct request_sock *req_unhash,
1199 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001201 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001202 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001203 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001204 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 struct inet_sock *newinet;
Ricardo Dias01770a12020-11-20 11:11:33 +00001206 bool found_dup_sk = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 struct tcp_sock *newtp;
1208 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001209#ifdef CONFIG_TCP_MD5SIG
1210 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001211 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001212#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001213 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
1215 if (skb->protocol == htons(ETH_P_IP)) {
1216 /*
1217 * v6 mapped
1218 */
1219
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001220 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1221 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Ian Morris63159f22015-03-29 14:00:04 +01001223 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 return NULL;
1225
Eric Dumazet93a77c12019-03-19 07:01:08 -07001226 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001229 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 newtp = tcp_sk(newsk);
1231
1232 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1233
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001234 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001236 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -08001237 if (sk_is_mptcp(newsk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001238 mptcpv6_handle_mapped(newsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001240#ifdef CONFIG_TCP_MD5SIG
1241 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1242#endif
1243
WANG Cong83eadda2017-05-09 16:59:54 -07001244 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001245 newnp->ipv6_ac_list = NULL;
1246 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 newnp->pktoptions = NULL;
1248 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001249 newnp->mcast_oif = inet_iif(skb);
1250 newnp->mcast_hops = ip_hdr(skb)->ttl;
1251 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001252 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001253 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001255 /*
1256 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1257 * here, tcp_create_openreq_child now does this for us, see the comment in
1258 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001262 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 Sync it now.
1264 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001265 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267 return newsk;
1268 }
1269
Eric Dumazet634fb9792013-10-09 15:21:29 -07001270 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 if (sk_acceptq_is_full(sk))
1273 goto out_overflow;
1274
David S. Miller493f3772010-12-02 12:14:29 -08001275 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001276 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001277 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
1281 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001282 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001283 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001285 /*
1286 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1287 * count here, tcp_create_openreq_child now does this for us, see the
1288 * comment in that function for the gory details. -acme
1289 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
Stephen Hemminger59eed272006-08-25 15:55:43 -07001291 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001292 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001293 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
Eric Dumazet93a77c12019-03-19 07:01:08 -07001295 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 newtp = tcp_sk(newsk);
1298 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001299 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1302
Eric Dumazet634fb9792013-10-09 15:21:29 -07001303 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1304 newnp->saddr = ireq->ir_v6_loc_addr;
1305 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1306 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001308 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310 First: no IPv4 options.
1311 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001312 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001313 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001314 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001315 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 /* Clone RX bits */
1318 newnp->rxopt.all = np->rxopt.all;
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001322 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001323 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001324 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001325 if (np->repflow)
1326 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Wei Wangac8f1712020-09-09 17:50:48 -07001328 /* Set ToS of the new socket based upon the value of incoming SYN. */
1329 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1330 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 /* Clone native IPv6 options from listening socket (if any)
1333
1334 Yes, keeping reference count would be much more clever,
1335 but we make one more one thing there: reattach optmem
1336 to newsk.
1337 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001338 opt = ireq->ipv6_opt;
1339 if (!opt)
1340 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001341 if (opt) {
1342 opt = ipv6_dup_options(newsk, opt);
1343 RCU_INIT_POINTER(newnp->opt, opt);
1344 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001345 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001346 if (opt)
1347 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1348 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Daniel Borkmann81164412015-01-05 23:57:48 +01001350 tcp_ca_openreq_child(newsk, dst);
1351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001353 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001354
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 tcp_initialize_rcv_mss(newsk);
1356
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001357 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1358 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001360#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001361 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1362
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001363 /* Copy over the MD5 key from the original socket */
David Aherndea53bb2019-12-30 14:14:28 -08001364 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Ian Morris53b24b82015-03-29 14:00:05 +01001365 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001366 /* We're using one, so create a matching key
1367 * on the newsk structure. If we fail to get
1368 * memory, then we end up not copying the key
1369 * across. Shucks.
1370 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001371 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
David Aherndea53bb2019-12-30 14:14:28 -08001372 AF_INET6, 128, l3index, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001373 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001374 }
1375#endif
1376
Balazs Scheidler093d2822010-10-21 13:06:43 +02001377 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001378 inet_csk_prepare_forced_close(newsk);
1379 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001380 goto out;
1381 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001382 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1383 &found_dup_sk);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001384 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001385 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001386
1387 /* Clone pktoptions received with SYN, if we own the req */
1388 if (ireq->pktopts) {
1389 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001390 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001391 consume_skb(ireq->pktopts);
1392 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001393 if (newnp->pktoptions) {
1394 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001395 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001396 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001397 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001398 } else {
1399 if (!req_unhash && found_dup_sk) {
1400 /* This code path should only be executed in the
1401 * syncookie case only
1402 */
1403 bh_unlock_sock(newsk);
1404 sock_put(newsk);
1405 newsk = NULL;
1406 }
Eric Dumazetce105002015-10-30 09:46:12 -07001407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409 return newsk;
1410
1411out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001412 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001413out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001415out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001416 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 return NULL;
1418}
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001421 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 *
1423 * We have a potential double-lock case here, so even when
1424 * doing backlog processing we use the BH locking scheme.
1425 * This is because we cannot sleep with the original spinlock
1426 * held.
1427 */
1428static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1429{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001430 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001432 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434 /* Imagine: socket is IPv6. IPv4 packet arrives,
1435 goes to IPv4 receive handler and backlogged.
1436 From backlog it always goes here. Kerboom...
1437 Fortunately, tcp_rcv_established and rcv_established
1438 handle them correctly, but it is not case with
1439 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1440 */
1441
1442 if (skb->protocol == htons(ETH_P_IP))
1443 return tcp_v4_do_rcv(sk, skb);
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 /*
1446 * socket locking is here for SMP purposes as backlog rcv
1447 * is currently called with bh processing disabled.
1448 */
1449
1450 /* Do Stevens' IPV6_PKTOPTIONS.
1451
1452 Yes, guys, it is the only place in our code, where we
1453 may make it not affecting IPv4.
1454 The rest of code is protocol independent,
1455 and I do not like idea to uglify IPv4.
1456
1457 Actually, all the idea behind IPV6_PKTOPTIONS
1458 looks not very well thought. For now we latch
1459 options, received in the last packet, enqueued
1460 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001461 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 */
1463 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001464 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001467 struct dst_entry *dst = sk->sk_rx_dst;
1468
Tom Herbertbdeab992011-08-14 19:45:55 +00001469 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001470 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001471 if (dst) {
1472 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1473 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1474 dst_release(dst);
1475 sk->sk_rx_dst = NULL;
1476 }
1477 }
1478
Yafang Shao3d97d882018-05-29 23:27:31 +08001479 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (opt_skb)
1481 goto ipv6_pktoptions;
1482 return 0;
1483 }
1484
Eric Dumazet12e25e12015-06-03 23:49:21 -07001485 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 goto csum_err;
1487
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001488 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001489 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1490
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 if (!nsk)
1492 goto discard;
1493
Weilong Chen4c99aa42013-12-19 18:44:34 +08001494 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 if (tcp_child_process(sk, nsk, skb))
1496 goto reset;
1497 if (opt_skb)
1498 __kfree_skb(opt_skb);
1499 return 0;
1500 }
Neil Horman47482f132011-04-06 13:07:09 -07001501 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001502 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001504 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (opt_skb)
1507 goto ipv6_pktoptions;
1508 return 0;
1509
1510reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001511 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512discard:
1513 if (opt_skb)
1514 __kfree_skb(opt_skb);
1515 kfree_skb(skb);
1516 return 0;
1517csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001518 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1519 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 goto discard;
1521
1522
1523ipv6_pktoptions:
1524 /* Do you ask, what is it?
1525
1526 1. skb was enqueued by tcp.
1527 2. skb is added to tail of read queue, rather than out of order.
1528 3. socket is not in passive state.
1529 4. Finally, it really contains options, which user wants to receive.
1530 */
1531 tp = tcp_sk(sk);
1532 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1533 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001534 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001535 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001536 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001537 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001538 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001539 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001540 if (np->repflow)
1541 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001542 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001544 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 opt_skb = xchg(&np->pktoptions, opt_skb);
1546 } else {
1547 __kfree_skb(opt_skb);
1548 opt_skb = xchg(&np->pktoptions, NULL);
1549 }
1550 }
1551
Wei Yongjun800d55f2009-02-23 21:45:33 +00001552 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 return 0;
1554}
1555
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001556static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1557 const struct tcphdr *th)
1558{
1559 /* This is tricky: we move IP6CB at its correct location into
1560 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1561 * _decode_session6() uses IP6CB().
1562 * barrier() makes sure compiler won't play aliasing games.
1563 */
1564 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1565 sizeof(struct inet6_skb_parm));
1566 barrier();
1567
1568 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1569 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1570 skb->len - th->doff*4);
1571 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1572 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1573 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1574 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1575 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001576 TCP_SKB_CB(skb)->has_rxtstamp =
1577 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001578}
1579
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001580INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001582 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001583 int sdif = inet6_sdif(skb);
David Ahernd14c77e2019-12-30 14:14:26 -08001584 int dif = inet6_iif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001585 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001586 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001587 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 struct sock *sk;
1589 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001590 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591
1592 if (skb->pkt_type != PACKET_HOST)
1593 goto discard_it;
1594
1595 /*
1596 * Count it even if it's bad.
1597 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001598 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
1600 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1601 goto discard_it;
1602
Eric Dumazetea1627c2016-05-13 09:16:40 -07001603 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Eric Dumazetea1627c2016-05-13 09:16:40 -07001605 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 goto bad_packet;
1607 if (!pskb_may_pull(skb, th->doff*4))
1608 goto discard_it;
1609
Tom Herberte4f45b72014-05-02 16:29:51 -07001610 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001611 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Eric Dumazetea1627c2016-05-13 09:16:40 -07001613 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001614 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001616lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001617 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001618 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001619 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 if (!sk)
1621 goto no_tcp_socket;
1622
1623process:
1624 if (sk->sk_state == TCP_TIME_WAIT)
1625 goto do_time_wait;
1626
Eric Dumazet079096f2015-10-02 11:43:32 -07001627 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1628 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001629 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001630 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001631
1632 sk = req->rsk_listener;
David Ahernd14c77e2019-12-30 14:14:26 -08001633 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001634 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001635 reqsk_put(req);
1636 goto discard_it;
1637 }
Frank van der Linden4fd44a982018-06-12 23:09:37 +00001638 if (tcp_checksum_complete(skb)) {
1639 reqsk_put(req);
1640 goto csum_error;
1641 }
Eric Dumazet77166822016-02-18 05:39:18 -08001642 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001643 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001644 goto lookup;
1645 }
Eric Dumazet77166822016-02-18 05:39:18 -08001646 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001647 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001648 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001649 if (!tcp_filter(sk, skb)) {
1650 th = (const struct tcphdr *)skb->data;
1651 hdr = ipv6_hdr(skb);
1652 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001653 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001654 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001655 if (!nsk) {
1656 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001657 if (req_stolen) {
1658 /* Another cpu got exclusive access to req
1659 * and created a full blown socket.
1660 * Try to feed this packet to this socket
1661 * instead of discarding it.
1662 */
1663 tcp_v6_restore_cb(skb);
1664 sock_put(sk);
1665 goto lookup;
1666 }
Eric Dumazet77166822016-02-18 05:39:18 -08001667 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001668 }
1669 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001670 reqsk_put(req);
1671 tcp_v6_restore_cb(skb);
1672 } else if (tcp_child_process(sk, nsk, skb)) {
1673 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001674 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001675 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001676 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001677 return 0;
1678 }
1679 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001680 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001681 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001682 goto discard_and_relse;
1683 }
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1686 goto discard_and_relse;
1687
David Ahernd14c77e2019-12-30 14:14:26 -08001688 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001689 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001690
Eric Dumazetac6e7802016-11-10 13:12:35 -08001691 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001693 th = (const struct tcphdr *)skb->data;
1694 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001695 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 skb->dev = NULL;
1698
Eric Dumazete994b2f2015-10-02 11:43:39 -07001699 if (sk->sk_state == TCP_LISTEN) {
1700 ret = tcp_v6_do_rcv(sk, skb);
1701 goto put_and_return;
1702 }
1703
1704 sk_incoming_cpu_update(sk);
1705
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001706 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001707 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 ret = 0;
1709 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001710 skb_to_free = sk->sk_rx_skb_cache;
1711 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001712 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001713 } else {
1714 if (tcp_add_backlog(sk, skb))
1715 goto discard_and_relse;
1716 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001719 if (skb_to_free)
1720 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001721put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001722 if (refcounted)
1723 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return ret ? -1 : 0;
1725
1726no_tcp_socket:
1727 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1728 goto discard_it;
1729
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001730 tcp_v6_fill_cb(skb, hdr, th);
1731
Eric Dumazet12e25e12015-06-03 23:49:21 -07001732 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001733csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001734 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001736 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001738 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 }
1740
1741discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 kfree_skb(skb);
1743 return 0;
1744
1745discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001746 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001747 if (refcounted)
1748 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 goto discard_it;
1750
1751do_time_wait:
1752 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001753 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 goto discard_it;
1755 }
1756
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001757 tcp_v6_fill_cb(skb, hdr, th);
1758
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001759 if (tcp_checksum_complete(skb)) {
1760 inet_twsk_put(inet_twsk(sk));
1761 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001764 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 case TCP_TW_SYN:
1766 {
1767 struct sock *sk2;
1768
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001769 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001770 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001771 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001772 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001773 ntohs(th->dest),
1774 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001775 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001776 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001777 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001778 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001780 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001781 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 goto process;
1783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001785 /* to ACK */
Joe Perchesa8eceea2020-03-12 15:50:22 -07001786 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 case TCP_TW_ACK:
1788 tcp_v6_timewait_ack(sk, skb);
1789 break;
1790 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001791 tcp_v6_send_reset(sk, skb);
1792 inet_twsk_deschedule_put(inet_twsk(sk));
1793 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001794 case TCP_TW_SUCCESS:
1795 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797 goto discard_it;
1798}
1799
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001800INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001801{
1802 const struct ipv6hdr *hdr;
1803 const struct tcphdr *th;
1804 struct sock *sk;
1805
1806 if (skb->pkt_type != PACKET_HOST)
1807 return;
1808
1809 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1810 return;
1811
1812 hdr = ipv6_hdr(skb);
1813 th = tcp_hdr(skb);
1814
1815 if (th->doff < sizeof(struct tcphdr) / 4)
1816 return;
1817
Eric Dumazet870c3152014-10-17 09:17:20 -07001818 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001819 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1820 &hdr->saddr, th->source,
1821 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001822 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001823 if (sk) {
1824 skb->sk = sk;
1825 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001826 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001827 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001828
Eric Dumazetc7109982012-07-26 12:18:11 +00001829 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001830 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001831 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001832 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001833 skb_dst_set_noref(skb, dst);
1834 }
1835 }
1836}
1837
David S. Millerccb7c412010-12-01 18:09:13 -08001838static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1839 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1840 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001841 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001842};
1843
Eric Dumazetdd2e0b82020-06-19 12:12:35 -07001844INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1845{
1846 struct ipv6_pinfo *np = inet6_sk(sk);
1847
1848 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1849}
1850
Mat Martineau35b2c322020-01-09 07:59:21 -08001851const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001852 .queue_xmit = inet6_csk_xmit,
1853 .send_check = tcp_v6_send_check,
1854 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001855 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001856 .conn_request = tcp_v6_conn_request,
1857 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001858 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001859 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001860 .setsockopt = ipv6_setsockopt,
1861 .getsockopt = ipv6_getsockopt,
1862 .addr2sockaddr = inet6_csk_addr2sockaddr,
1863 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001864 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865};
1866
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001867#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001868static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001869 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001870 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001871 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001872};
David S. Millera9286302006-11-14 19:53:22 -08001873#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001874
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875/*
1876 * TCP over IPv4 via INET6 API
1877 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001878static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001879 .queue_xmit = ip_queue_xmit,
1880 .send_check = tcp_v4_send_check,
1881 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001882 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001883 .conn_request = tcp_v6_conn_request,
1884 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001885 .net_header_len = sizeof(struct iphdr),
1886 .setsockopt = ipv6_setsockopt,
1887 .getsockopt = ipv6_getsockopt,
1888 .addr2sockaddr = inet6_csk_addr2sockaddr,
1889 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001890 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891};
1892
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001893#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001894static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001895 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001896 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001897 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001898};
David S. Millera9286302006-11-14 19:53:22 -08001899#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901/* NOTE: A lot of things set to zero explicitly by call to
1902 * sk_alloc() so need not be done here.
1903 */
1904static int tcp_v6_init_sock(struct sock *sk)
1905{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001906 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
Neal Cardwell900f65d2012-04-19 09:55:21 +00001908 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001910 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001912#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001913 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001914#endif
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 return 0;
1917}
1918
Brian Haley7d06b2e2008-06-14 17:04:49 -07001919static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001922 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001925#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001927static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001928 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001930 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001931 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1932 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 if (ttd < 0)
1935 ttd = 0;
1936
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 seq_printf(seq,
1938 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001939 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 i,
1941 src->s6_addr32[0], src->s6_addr32[1],
1942 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001943 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 dest->s6_addr32[0], dest->s6_addr32[1],
1945 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001946 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001948 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001949 1, /* timers active (only the expire timer) */
1950 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a4f2012-10-27 23:16:46 +00001951 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001952 from_kuid_munged(seq_user_ns(seq),
1953 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001954 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 0, /* open_requests have no inode */
1956 0, req);
1957}
1958
1959static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1960{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001961 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 __u16 destp, srcp;
1963 int timer_active;
1964 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001965 const struct inet_sock *inet = inet_sk(sp);
1966 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001967 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001968 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001969 int rx_queue;
1970 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
Eric Dumazetefe42082013-10-03 15:42:29 -07001972 dest = &sp->sk_v6_daddr;
1973 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001974 destp = ntohs(inet->inet_dport);
1975 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001976
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001977 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001978 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001979 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001981 timer_expires = icsk->icsk_timeout;
1982 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001984 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 } else if (timer_pending(&sp->sk_timer)) {
1986 timer_active = 2;
1987 timer_expires = sp->sk_timer.expires;
1988 } else {
1989 timer_active = 0;
1990 timer_expires = jiffies;
1991 }
1992
Yafang Shao986ffdf2017-12-20 11:12:52 +08001993 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001994 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08001995 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001996 else
1997 /* Because we don't lock the socket,
1998 * we might find a transient negative value.
1999 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07002000 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07002001 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 seq_printf(seq,
2004 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02002005 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 i,
2007 src->s6_addr32[0], src->s6_addr32[1],
2008 src->s6_addr32[2], src->s6_addr32[3], srcp,
2009 dest->s6_addr32[0], dest->s6_addr32[1],
2010 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002011 state,
Eric Dumazet0f317462019-10-10 20:17:41 -07002012 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002013 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002015 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002016 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002017 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002018 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002020 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002021 jiffies_to_clock_t(icsk->icsk_rto),
2022 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002023 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07002024 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002025 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002026 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07002027 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 );
2029}
2030
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002031static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002032 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033{
Eric Dumazet789f5582015-04-12 18:51:09 -07002034 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002035 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Eric Dumazetefe42082013-10-03 15:42:29 -07002038 dest = &tw->tw_v6_daddr;
2039 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 destp = ntohs(tw->tw_dport);
2041 srcp = ntohs(tw->tw_sport);
2042
2043 seq_printf(seq,
2044 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002045 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 i,
2047 src->s6_addr32[0], src->s6_addr32[1],
2048 src->s6_addr32[2], src->s6_addr32[3], srcp,
2049 dest->s6_addr32[0], dest->s6_addr32[1],
2050 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2051 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002052 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002053 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054}
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056static int tcp6_seq_show(struct seq_file *seq, void *v)
2057{
2058 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002059 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 if (v == SEQ_START_TOKEN) {
2062 seq_puts(seq,
2063 " sl "
2064 "local_address "
2065 "remote_address "
2066 "st tx_queue rx_queue tr tm->when retrnsmt"
2067 " uid timeout inode\n");
2068 goto out;
2069 }
2070 st = seq->private;
2071
Eric Dumazet079096f2015-10-02 11:43:32 -07002072 if (sk->sk_state == TCP_TIME_WAIT)
2073 get_timewait6_sock(seq, v, st->num);
2074 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002075 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002076 else
2077 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078out:
2079 return 0;
2080}
2081
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002082static const struct seq_operations tcp6_seq_ops = {
2083 .show = tcp6_seq_show,
2084 .start = tcp_seq_start,
2085 .next = tcp_seq_next,
2086 .stop = tcp_seq_stop,
2087};
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091};
2092
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002093int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002095 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2096 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002097 return -ENOMEM;
2098 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099}
2100
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002101void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002103 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105#endif
2106
2107struct proto tcpv6_prot = {
2108 .name = "TCPv6",
2109 .owner = THIS_MODULE,
2110 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002111 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 .connect = tcp_v6_connect,
2113 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002114 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 .ioctl = tcp_ioctl,
2116 .init = tcp_v6_init_sock,
2117 .destroy = tcp_v6_destroy_sock,
2118 .shutdown = tcp_shutdown,
2119 .setsockopt = tcp_setsockopt,
2120 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002121 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002123 .sendmsg = tcp_sendmsg,
2124 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002126 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05002127 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002128 .unhash = inet_unhash,
2129 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002131 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002132 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 .sockets_allocated = &tcp_sockets_allocated,
2134 .memory_allocated = &tcp_memory_allocated,
2135 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002136 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002137 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002138 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2139 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 .max_header = MAX_TCP_HEADER,
2141 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002142 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002143 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002144 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002145 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002146 .no_autobind = true,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002147 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148};
Vinay Kumar Yadav6abde0b2020-06-02 00:07:05 +05302149EXPORT_SYMBOL_GPL(tcpv6_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
David Aherna8e3bb32017-08-28 15:14:20 -07002151/* thinking of making this const? Don't.
2152 * early_demux can change based on sysctl.
2153 */
Julia Lawall39294c32017-08-01 18:27:28 +02002154static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002155 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002156 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 .handler = tcp_v6_rcv,
2158 .err_handler = tcp_v6_err,
2159 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2160};
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162static struct inet_protosw tcpv6_protosw = {
2163 .type = SOCK_STREAM,
2164 .protocol = IPPROTO_TCP,
2165 .prot = &tcpv6_prot,
2166 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002167 .flags = INET_PROTOSW_PERMANENT |
2168 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169};
2170
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002171static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002172{
Denis V. Lunev56772422008-04-03 14:28:30 -07002173 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2174 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002175}
2176
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002177static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002178{
Denis V. Lunev56772422008-04-03 14:28:30 -07002179 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002180}
2181
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002182static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002183{
Haishuang Yan1946e672016-12-28 17:52:32 +08002184 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002185}
2186
2187static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002188 .init = tcpv6_net_init,
2189 .exit = tcpv6_net_exit,
2190 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002191};
2192
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002193int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002195 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002196
Vlad Yasevich33362882012-11-15 08:49:15 +00002197 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2198 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002199 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002200
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002201 /* register inet6 protocol */
2202 ret = inet6_register_protosw(&tcpv6_protosw);
2203 if (ret)
2204 goto out_tcpv6_protocol;
2205
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002206 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002207 if (ret)
2208 goto out_tcpv6_protosw;
Mat Martineauf870fa02020-01-21 16:56:15 -08002209
2210 ret = mptcpv6_init();
2211 if (ret)
2212 goto out_tcpv6_pernet_subsys;
2213
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002214out:
2215 return ret;
2216
Mat Martineauf870fa02020-01-21 16:56:15 -08002217out_tcpv6_pernet_subsys:
2218 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002219out_tcpv6_protosw:
2220 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002221out_tcpv6_protocol:
2222 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002223 goto out;
2224}
2225
Daniel Lezcano09f77092007-12-13 05:34:58 -08002226void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002227{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002228 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002229 inet6_unregister_protosw(&tcpv6_protosw);
2230 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231}