blob: 144c39db9898abcf8cc8582597d04a912ef53495 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002/*
3 * NET Generic infrastructure for Network protocols.
4 *
Haishuang Yanfee83d02016-12-28 17:52:33 +08005 * Definitions for request_sock
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07006 *
7 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 *
9 * From code originally in include/net/tcp.h
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070010 */
11#ifndef _REQUEST_SOCK_H
12#define _REQUEST_SOCK_H
13
14#include <linux/slab.h>
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -070015#include <linux/spinlock.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070016#include <linux/types.h>
Ilpo Järvinen547b7922008-07-25 21:43:18 -070017#include <linux/bug.h>
Reshetova, Elena41c6d652017-06-30 13:08:01 +030018#include <linux/refcount.h>
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -070019
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070020#include <net/sock.h>
21
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070022struct request_sock;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070023struct sk_buff;
24struct dst_entry;
25struct proto;
26
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070027struct request_sock_ops {
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070028 int family;
Alexey Dobriyan417ccf62017-05-23 00:21:39 +030029 unsigned int obj_size;
Christoph Lametere18b8902006-12-06 20:33:20 -080030 struct kmem_cache *slab;
Catalin Marinas7e56b5d2008-11-21 16:45:22 -080031 char *slab_name;
Eric Dumazetea3bea32015-09-25 07:39:23 -070032 int (*rtx_syn_ack)(const struct sock *sk,
Christoph Paasch1a2c6182013-03-17 08:23:34 +000033 struct request_sock *req);
Eric Dumazeta00e7442015-09-29 07:42:39 -070034 void (*send_ack)(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070035 struct request_sock *req);
Eric Dumazeta00e7442015-09-29 07:42:39 -070036 void (*send_reset)(const struct sock *sk,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080037 struct sk_buff *skb);
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070038 void (*destructor)(struct request_sock *req);
Eric Dumazet42cb80a2015-03-22 10:22:19 -070039 void (*syn_ack_timeout)(const struct request_sock *req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070040};
41
Eric Dumazet1b70e972015-09-25 07:39:24 -070042int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
Eric Dumazete6c022a4f2012-10-27 23:16:46 +000043
Martin KaFai Lau70a217f2020-08-20 12:00:14 -070044struct saved_syn {
Martin KaFai Lau267cf9f2020-08-20 12:01:23 -070045 u32 mac_hdrlen;
Martin KaFai Lau70a217f2020-08-20 12:00:14 -070046 u32 network_hdrlen;
47 u32 tcp_hdrlen;
48 u8 data[];
49};
50
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070051/* struct request_sock - mini sock to represent a connection request
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070052 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070053struct request_sock {
Eric Dumazet634fb9792013-10-09 15:21:29 -070054 struct sock_common __req_common;
Eric Dumazet1e2e0112015-03-12 16:44:06 -070055#define rsk_refcnt __req_common.skc_refcnt
Eric Dumazet52452c52015-03-19 19:04:19 -070056#define rsk_hash __req_common.skc_hash
Eric Dumazet8e5eb542015-10-08 19:33:22 -070057#define rsk_listener __req_common.skc_listener
Eric Dumazeted53d0a2015-10-08 19:33:23 -070058#define rsk_window_clamp __req_common.skc_window_clamp
59#define rsk_rcv_wnd __req_common.skc_rcv_wnd
Eric Dumazet1e2e0112015-03-12 16:44:06 -070060
Eric Dumazet3fb62c52013-04-19 14:29:25 -070061 struct request_sock *dl_next;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070062 u16 mss;
Eric Dumazete6c022a4f2012-10-27 23:16:46 +000063 u8 num_retrans; /* number of retransmits */
Florian Westphalf8ace8d2020-07-30 21:25:50 +020064 u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */
Eric Dumazete6c022a4f2012-10-27 23:16:46 +000065 u8 num_timeout:7; /* number of timeouts */
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070066 u32 ts_recent;
Eric Dumazetfa76ce732015-03-19 19:04:20 -070067 struct timer_list rsk_timer;
Eric Dumazet72a3eff2006-11-16 02:30:37 -080068 const struct request_sock_ops *rsk_ops;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070069 struct sock *sk;
Martin KaFai Lau70a217f2020-08-20 12:00:14 -070070 struct saved_syn *saved_syn;
Venkat Yekkirala4237c752006-07-24 23:32:50 -070071 u32 secid;
Venkat Yekkirala6b877692006-11-08 17:04:09 -060072 u32 peer_secid;
Akhmat Karakotov59031232022-01-28 22:26:21 +030073 u32 timeout;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070074};
75
Florian Westphalb1f0a0e2015-12-21 21:29:24 +010076static inline struct request_sock *inet_reqsk(const struct sock *sk)
Eric Dumazetb267cdd2015-10-02 11:43:27 -070077{
78 return (struct request_sock *)sk;
79}
80
81static inline struct sock *req_to_sk(struct request_sock *req)
82{
83 return (struct sock *)req;
84}
85
Eric Dumazet4e9a5782015-03-17 18:32:28 -070086static inline struct request_sock *
Eric Dumazeta1a53442015-10-04 21:08:11 -070087reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
88 bool attach_listener)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070089{
Eric Dumazete96f78a2015-10-03 06:27:28 -070090 struct request_sock *req;
91
92 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
Eric Dumazet3a5d1c02016-04-01 08:52:16 -070093 if (!req)
94 return NULL;
95 req->rsk_listener = NULL;
96 if (attach_listener) {
Reshetova, Elena41c6d652017-06-30 13:08:01 +030097 if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
Eric Dumazet3a5d1c02016-04-01 08:52:16 -070098 kmem_cache_free(ops->slab, req);
99 return NULL;
Eric Dumazeta1a53442015-10-04 21:08:11 -0700100 }
Eric Dumazet3a5d1c02016-04-01 08:52:16 -0700101 req->rsk_listener = sk_listener;
Eric Dumazet4e9a5782015-03-17 18:32:28 -0700102 }
Eric Dumazet3a5d1c02016-04-01 08:52:16 -0700103 req->rsk_ops = ops;
104 req_to_sk(req)->sk_prot = sk_listener->sk_prot;
105 sk_node_init(&req_to_sk(req)->sk_node);
106 sk_tx_queue_clear(req_to_sk(req));
107 req->saved_syn = NULL;
Akhmat Karakotov59031232022-01-28 22:26:21 +0300108 req->timeout = 0;
Eric Dumazet85f9aa72019-06-19 09:38:38 -0700109 req->num_timeout = 0;
110 req->num_retrans = 0;
111 req->sk = NULL;
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300112 refcount_set(&req->rsk_refcnt, 0);
Eric Dumazet3a5d1c02016-04-01 08:52:16 -0700113
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700114 return req;
115}
116
Guillaume Nault9403cf22019-03-19 16:05:44 +0100117static inline void __reqsk_free(struct request_sock *req)
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700118{
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700119 req->rsk_ops->destructor(req);
Eric Dumazet4e9a5782015-03-17 18:32:28 -0700120 if (req->rsk_listener)
121 sock_put(req->rsk_listener);
Eric Dumazetcd8ae852015-05-03 21:34:46 -0700122 kfree(req->saved_syn);
Eric Dumazet13854e52015-03-15 21:12:16 -0700123 kmem_cache_free(req->rsk_ops->slab, req);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700124}
125
Guillaume Nault9403cf22019-03-19 16:05:44 +0100126static inline void reqsk_free(struct request_sock *req)
127{
128 WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
129 __reqsk_free(req);
130}
131
Eric Dumazet1e2e0112015-03-12 16:44:06 -0700132static inline void reqsk_put(struct request_sock *req)
133{
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300134 if (refcount_dec_and_test(&req->rsk_refcnt))
Eric Dumazet1e2e0112015-03-12 16:44:06 -0700135 reqsk_free(req);
136}
137
Jerry Chu10467162012-08-31 12:29:11 +0000138/*
139 * For a TCP Fast Open listener -
140 * lock - protects the access to all the reqsk, which is co-owned by
141 * the listener and the child socket.
142 * qlen - pending TFO requests (still in TCP_SYN_RECV).
143 * max_qlen - max TFO reqs allowed before TFO is disabled.
144 *
145 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
146 * structure above. But there is some implementation difficulty due to
147 * listen_sock being part of request_sock_queue hence will be freed when
148 * a listener is stopped. But TFO related fields may continue to be
149 * accessed even after a listener is closed, until its sk_refcnt drops
150 * to 0 implying no more outstanding TFO reqs. One solution is to keep
151 * listen_opt around until sk_refcnt drops to 0. But there is some other
152 * complexity that needs to be resolved. E.g., a listener can be disabled
153 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
154 */
155struct fastopen_queue {
156 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
157 struct request_sock *rskq_rst_tail; /* requests that caused RST.
158 * This is part of the defense
159 * against spoofing attack.
160 */
161 spinlock_t lock;
162 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
163 int max_qlen; /* != 0 iff TFO is currently enabled */
Yuchung Cheng1fba70e2017-10-18 11:22:51 -0700164
165 struct tcp_fastopen_context __rcu *ctx; /* cipher context for cookie */
Jerry Chu10467162012-08-31 12:29:11 +0000166};
167
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700168/** struct request_sock_queue - queue of request_socks
169 *
170 * @rskq_accept_head - FIFO head of established children
171 * @rskq_accept_tail - FIFO tail of established children
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -0700172 * @rskq_defer_accept - User waits for some data after accept()
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700173 *
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700174 */
175struct request_sock_queue {
Eric Dumazetfff1f302015-10-02 11:43:23 -0700176 spinlock_t rskq_lock;
177 u8 rskq_defer_accept;
Eric Dumazetef547f22015-10-02 11:43:37 -0700178
Eric Dumazet8d2675f2015-10-02 11:43:25 -0700179 u32 synflood_warned;
Eric Dumazetaac065c2015-10-02 11:43:24 -0700180 atomic_t qlen;
181 atomic_t young;
182
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700183 struct request_sock *rskq_accept_head;
184 struct request_sock *rskq_accept_tail;
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700185 struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
186 * if TFO is enabled.
Jerry Chu10467162012-08-31 12:29:11 +0000187 */
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700188};
189
Eric Dumazetef547f22015-10-02 11:43:37 -0700190void reqsk_queue_alloc(struct request_sock_queue *queue);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700191
Joe Perchesc0f45022013-09-22 10:32:20 -0700192void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
193 bool reset);
Arnaldo Carvalho de Melo83e36092005-08-09 19:33:31 -0700194
Eric Dumazetfff1f302015-10-02 11:43:23 -0700195static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700196{
Eric Dumazet60b173c2019-10-09 14:51:20 -0700197 return READ_ONCE(queue->rskq_accept_head) == NULL;
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700198}
199
Eric Dumazetfff1f302015-10-02 11:43:23 -0700200static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
201 struct sock *parent)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700202{
Eric Dumazetfff1f302015-10-02 11:43:23 -0700203 struct request_sock *req;
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700204
Eric Dumazetfff1f302015-10-02 11:43:23 -0700205 spin_lock_bh(&queue->rskq_lock);
206 req = queue->rskq_accept_head;
207 if (req) {
208 sk_acceptq_removed(parent);
Eric Dumazet60b173c2019-10-09 14:51:20 -0700209 WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
Eric Dumazetfff1f302015-10-02 11:43:23 -0700210 if (queue->rskq_accept_head == NULL)
211 queue->rskq_accept_tail = NULL;
212 }
213 spin_unlock_bh(&queue->rskq_lock);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700214 return req;
215}
216
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700217static inline void reqsk_queue_removed(struct request_sock_queue *queue,
218 const struct request_sock *req)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700219{
Eric Dumazete6c022a4f2012-10-27 23:16:46 +0000220 if (req->num_timeout == 0)
Eric Dumazetaac065c2015-10-02 11:43:24 -0700221 atomic_dec(&queue->young);
222 atomic_dec(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700223}
224
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700225static inline void reqsk_queue_added(struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700226{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700227 atomic_inc(&queue->young);
228 atomic_inc(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700229}
230
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700231static inline int reqsk_queue_len(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700232{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700233 return atomic_read(&queue->qlen);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700234}
235
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700236static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700237{
Eric Dumazetaac065c2015-10-02 11:43:24 -0700238 return atomic_read(&queue->young);
Arnaldo Carvalho de Melo0e875062005-06-18 22:47:59 -0700239}
240
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700241#endif /* _REQUEST_SOCK_H */