Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2018 Chelsio Communications, Inc. |
| 4 | * |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 5 | * Written by: Atul Gupta (atul.gupta@chelsio.com) |
| 6 | */ |
| 7 | |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/list.h> |
| 10 | #include <linux/workqueue.h> |
| 11 | #include <linux/skbuff.h> |
| 12 | #include <linux/timer.h> |
| 13 | #include <linux/notifier.h> |
| 14 | #include <linux/inetdevice.h> |
| 15 | #include <linux/ip.h> |
| 16 | #include <linux/tcp.h> |
| 17 | #include <linux/sched/signal.h> |
| 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/kprobes.h> |
| 20 | #include <linux/if_vlan.h> |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 21 | #include <linux/ipv6.h> |
| 22 | #include <net/ipv6.h> |
| 23 | #include <net/transp_v6.h> |
| 24 | #include <net/ip6_route.h> |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 25 | #include <net/inet_common.h> |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 26 | #include <net/tcp.h> |
| 27 | #include <net/dst.h> |
Atul Gupta | 76f7164 | 2019-01-17 20:56:21 -0800 | [diff] [blame] | 28 | #include <net/tls.h> |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 29 | #include <net/addrconf.h> |
| 30 | #include <net/secure_seq.h> |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 31 | |
| 32 | #include "chtls.h" |
| 33 | #include "chtls_cm.h" |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 34 | #include "clip_tbl.h" |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 35 | |
| 36 | /* |
| 37 | * State transitions and actions for close. Note that if we are in SYN_SENT |
| 38 | * we remain in that state as we cannot control a connection while it's in |
| 39 | * SYN_SENT; such connections are allowed to establish and are then aborted. |
| 40 | */ |
| 41 | static unsigned char new_state[16] = { |
| 42 | /* current state: new state: action: */ |
| 43 | /* (Invalid) */ TCP_CLOSE, |
| 44 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 45 | /* TCP_SYN_SENT */ TCP_SYN_SENT, |
| 46 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
| 47 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, |
| 48 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, |
| 49 | /* TCP_TIME_WAIT */ TCP_CLOSE, |
| 50 | /* TCP_CLOSE */ TCP_CLOSE, |
| 51 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, |
| 52 | /* TCP_LAST_ACK */ TCP_LAST_ACK, |
| 53 | /* TCP_LISTEN */ TCP_CLOSE, |
| 54 | /* TCP_CLOSING */ TCP_CLOSING, |
| 55 | }; |
| 56 | |
| 57 | static struct chtls_sock *chtls_sock_create(struct chtls_dev *cdev) |
| 58 | { |
| 59 | struct chtls_sock *csk = kzalloc(sizeof(*csk), GFP_ATOMIC); |
| 60 | |
| 61 | if (!csk) |
| 62 | return NULL; |
| 63 | |
| 64 | csk->txdata_skb_cache = alloc_skb(TXDATA_SKB_LEN, GFP_ATOMIC); |
| 65 | if (!csk->txdata_skb_cache) { |
| 66 | kfree(csk); |
| 67 | return NULL; |
| 68 | } |
| 69 | |
| 70 | kref_init(&csk->kref); |
| 71 | csk->cdev = cdev; |
| 72 | skb_queue_head_init(&csk->txq); |
| 73 | csk->wr_skb_head = NULL; |
| 74 | csk->wr_skb_tail = NULL; |
| 75 | csk->mss = MAX_MSS; |
| 76 | csk->tlshws.ofld = 1; |
| 77 | csk->tlshws.txkey = -1; |
| 78 | csk->tlshws.rxkey = -1; |
| 79 | csk->tlshws.mfs = TLS_MFS; |
| 80 | skb_queue_head_init(&csk->tlshws.sk_recv_queue); |
| 81 | return csk; |
| 82 | } |
| 83 | |
| 84 | static void chtls_sock_release(struct kref *ref) |
| 85 | { |
| 86 | struct chtls_sock *csk = |
| 87 | container_of(ref, struct chtls_sock, kref); |
| 88 | |
| 89 | kfree(csk); |
| 90 | } |
| 91 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 92 | static struct net_device *chtls_find_netdev(struct chtls_dev *cdev, |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 93 | struct sock *sk) |
| 94 | { |
| 95 | struct net_device *ndev = cdev->ports[0]; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 96 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 97 | struct net_device *temp; |
| 98 | int addr_type; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 99 | #endif |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 100 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 101 | switch (sk->sk_family) { |
| 102 | case PF_INET: |
| 103 | if (likely(!inet_sk(sk)->inet_rcv_saddr)) |
| 104 | return ndev; |
Vinay Kumar Yadav | 30d9e50 | 2020-07-18 00:41:07 +0530 | [diff] [blame] | 105 | ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false); |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 106 | break; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 107 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 108 | case PF_INET6: |
| 109 | addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); |
| 110 | if (likely(addr_type == IPV6_ADDR_ANY)) |
| 111 | return ndev; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 112 | |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 113 | for_each_netdev_rcu(&init_net, temp) { |
| 114 | if (ipv6_chk_addr(&init_net, (struct in6_addr *) |
| 115 | &sk->sk_v6_rcv_saddr, temp, 1)) { |
| 116 | ndev = temp; |
| 117 | break; |
| 118 | } |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 119 | } |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 120 | break; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 121 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 122 | default: |
| 123 | return NULL; |
| 124 | } |
| 125 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 126 | if (!ndev) |
| 127 | return NULL; |
| 128 | |
| 129 | if (is_vlan_dev(ndev)) |
| 130 | return vlan_dev_real_dev(ndev); |
| 131 | return ndev; |
| 132 | } |
| 133 | |
| 134 | static void assign_rxopt(struct sock *sk, unsigned int opt) |
| 135 | { |
| 136 | const struct chtls_dev *cdev; |
| 137 | struct chtls_sock *csk; |
| 138 | struct tcp_sock *tp; |
| 139 | |
| 140 | csk = rcu_dereference_sk_user_data(sk); |
| 141 | tp = tcp_sk(sk); |
| 142 | |
| 143 | cdev = csk->cdev; |
| 144 | tp->tcp_header_len = sizeof(struct tcphdr); |
| 145 | tp->rx_opt.mss_clamp = cdev->mtus[TCPOPT_MSS_G(opt)] - 40; |
| 146 | tp->mss_cache = tp->rx_opt.mss_clamp; |
| 147 | tp->rx_opt.tstamp_ok = TCPOPT_TSTAMP_G(opt); |
| 148 | tp->rx_opt.snd_wscale = TCPOPT_SACK_G(opt); |
| 149 | tp->rx_opt.wscale_ok = TCPOPT_WSCALE_OK_G(opt); |
| 150 | SND_WSCALE(tp) = TCPOPT_SND_WSCALE_G(opt); |
| 151 | if (!tp->rx_opt.wscale_ok) |
| 152 | tp->rx_opt.rcv_wscale = 0; |
| 153 | if (tp->rx_opt.tstamp_ok) { |
| 154 | tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; |
| 155 | tp->rx_opt.mss_clamp -= TCPOLEN_TSTAMP_ALIGNED; |
| 156 | } else if (csk->opt2 & TSTAMPS_EN_F) { |
| 157 | csk->opt2 &= ~TSTAMPS_EN_F; |
| 158 | csk->mtu_idx = TCPOPT_MSS_G(opt); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | static void chtls_purge_receive_queue(struct sock *sk) |
| 163 | { |
| 164 | struct sk_buff *skb; |
| 165 | |
| 166 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { |
| 167 | skb_dst_set(skb, (void *)NULL); |
| 168 | kfree_skb(skb); |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | static void chtls_purge_write_queue(struct sock *sk) |
| 173 | { |
| 174 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 175 | struct sk_buff *skb; |
| 176 | |
| 177 | while ((skb = __skb_dequeue(&csk->txq))) { |
| 178 | sk->sk_wmem_queued -= skb->truesize; |
| 179 | __kfree_skb(skb); |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | static void chtls_purge_recv_queue(struct sock *sk) |
| 184 | { |
| 185 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 186 | struct chtls_hws *tlsk = &csk->tlshws; |
| 187 | struct sk_buff *skb; |
| 188 | |
| 189 | while ((skb = __skb_dequeue(&tlsk->sk_recv_queue)) != NULL) { |
| 190 | skb_dst_set(skb, NULL); |
| 191 | kfree_skb(skb); |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | static void abort_arp_failure(void *handle, struct sk_buff *skb) |
| 196 | { |
| 197 | struct cpl_abort_req *req = cplhdr(skb); |
| 198 | struct chtls_dev *cdev; |
| 199 | |
| 200 | cdev = (struct chtls_dev *)handle; |
| 201 | req->cmd = CPL_ABORT_NO_RST; |
| 202 | cxgb4_ofld_send(cdev->lldi->ports[0], skb); |
| 203 | } |
| 204 | |
| 205 | static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len) |
| 206 | { |
| 207 | if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) { |
| 208 | __skb_trim(skb, 0); |
| 209 | refcount_add(2, &skb->users); |
| 210 | } else { |
| 211 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); |
| 212 | } |
| 213 | return skb; |
| 214 | } |
| 215 | |
| 216 | static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb) |
| 217 | { |
| 218 | struct cpl_abort_req *req; |
| 219 | struct chtls_sock *csk; |
| 220 | struct tcp_sock *tp; |
| 221 | |
| 222 | csk = rcu_dereference_sk_user_data(sk); |
| 223 | tp = tcp_sk(sk); |
| 224 | |
| 225 | if (!skb) |
| 226 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, sizeof(*req)); |
| 227 | |
| 228 | req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req)); |
| 229 | INIT_TP_WR_CPL(req, CPL_ABORT_REQ, csk->tid); |
| 230 | skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); |
| 231 | req->rsvd0 = htonl(tp->snd_nxt); |
| 232 | req->rsvd1 = !csk_flag_nochk(csk, CSK_TX_DATA_SENT); |
| 233 | req->cmd = mode; |
| 234 | t4_set_arp_err_handler(skb, csk->cdev, abort_arp_failure); |
| 235 | send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST); |
| 236 | } |
| 237 | |
| 238 | static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb) |
| 239 | { |
| 240 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 241 | |
| 242 | if (unlikely(csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) || |
| 243 | !csk->cdev)) { |
| 244 | if (sk->sk_state == TCP_SYN_RECV) |
| 245 | csk_set_flag(csk, CSK_RST_ABORTED); |
| 246 | goto out; |
| 247 | } |
| 248 | |
| 249 | if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) { |
| 250 | struct tcp_sock *tp = tcp_sk(sk); |
| 251 | |
| 252 | if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0) |
| 253 | WARN_ONCE(1, "send tx flowc error"); |
| 254 | csk_set_flag(csk, CSK_TX_DATA_SENT); |
| 255 | } |
| 256 | |
| 257 | csk_set_flag(csk, CSK_ABORT_RPL_PENDING); |
| 258 | chtls_purge_write_queue(sk); |
| 259 | |
| 260 | csk_set_flag(csk, CSK_ABORT_SHUTDOWN); |
| 261 | if (sk->sk_state != TCP_SYN_RECV) |
| 262 | chtls_send_abort(sk, mode, skb); |
| 263 | else |
| 264 | goto out; |
| 265 | |
| 266 | return; |
| 267 | out: |
zhong jiang | ce1294d | 2018-09-20 17:57:16 +0800 | [diff] [blame] | 268 | kfree_skb(skb); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | static void release_tcp_port(struct sock *sk) |
| 272 | { |
| 273 | if (inet_csk(sk)->icsk_bind_hash) |
| 274 | inet_put_port(sk); |
| 275 | } |
| 276 | |
| 277 | static void tcp_uncork(struct sock *sk) |
| 278 | { |
| 279 | struct tcp_sock *tp = tcp_sk(sk); |
| 280 | |
| 281 | if (tp->nonagle & TCP_NAGLE_CORK) { |
| 282 | tp->nonagle &= ~TCP_NAGLE_CORK; |
| 283 | chtls_tcp_push(sk, 0); |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | static void chtls_close_conn(struct sock *sk) |
| 288 | { |
| 289 | struct cpl_close_con_req *req; |
| 290 | struct chtls_sock *csk; |
| 291 | struct sk_buff *skb; |
| 292 | unsigned int tid; |
| 293 | unsigned int len; |
| 294 | |
| 295 | len = roundup(sizeof(struct cpl_close_con_req), 16); |
| 296 | csk = rcu_dereference_sk_user_data(sk); |
| 297 | tid = csk->tid; |
| 298 | |
| 299 | skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); |
| 300 | req = (struct cpl_close_con_req *)__skb_put(skb, len); |
| 301 | memset(req, 0, len); |
| 302 | req->wr.wr_hi = htonl(FW_WR_OP_V(FW_TP_WR) | |
| 303 | FW_WR_IMMDLEN_V(sizeof(*req) - |
| 304 | sizeof(req->wr))); |
| 305 | req->wr.wr_mid = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)) | |
| 306 | FW_WR_FLOWID_V(tid)); |
| 307 | |
| 308 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); |
| 309 | |
| 310 | tcp_uncork(sk); |
| 311 | skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND); |
| 312 | if (sk->sk_state != TCP_SYN_SENT) |
| 313 | chtls_push_frames(csk, 1); |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * Perform a state transition during close and return the actions indicated |
| 318 | * for the transition. Do not make this function inline, the main reason |
| 319 | * it exists at all is to avoid multiple inlining of tcp_set_state. |
| 320 | */ |
| 321 | static int make_close_transition(struct sock *sk) |
| 322 | { |
| 323 | int next = (int)new_state[sk->sk_state]; |
| 324 | |
| 325 | tcp_set_state(sk, next & TCP_STATE_MASK); |
| 326 | return next & TCP_ACTION_FIN; |
| 327 | } |
| 328 | |
| 329 | void chtls_close(struct sock *sk, long timeout) |
| 330 | { |
| 331 | int data_lost, prev_state; |
| 332 | struct chtls_sock *csk; |
| 333 | |
| 334 | csk = rcu_dereference_sk_user_data(sk); |
| 335 | |
| 336 | lock_sock(sk); |
| 337 | sk->sk_shutdown |= SHUTDOWN_MASK; |
| 338 | |
| 339 | data_lost = skb_queue_len(&sk->sk_receive_queue); |
| 340 | data_lost |= skb_queue_len(&csk->tlshws.sk_recv_queue); |
| 341 | chtls_purge_recv_queue(sk); |
| 342 | chtls_purge_receive_queue(sk); |
| 343 | |
| 344 | if (sk->sk_state == TCP_CLOSE) { |
| 345 | goto wait; |
| 346 | } else if (data_lost || sk->sk_state == TCP_SYN_SENT) { |
| 347 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL); |
| 348 | release_tcp_port(sk); |
| 349 | goto unlock; |
| 350 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
| 351 | sk->sk_prot->disconnect(sk, 0); |
| 352 | } else if (make_close_transition(sk)) { |
| 353 | chtls_close_conn(sk); |
| 354 | } |
| 355 | wait: |
| 356 | if (timeout) |
| 357 | sk_stream_wait_close(sk, timeout); |
| 358 | |
| 359 | unlock: |
| 360 | prev_state = sk->sk_state; |
| 361 | sock_hold(sk); |
| 362 | sock_orphan(sk); |
| 363 | |
| 364 | release_sock(sk); |
| 365 | |
| 366 | local_bh_disable(); |
| 367 | bh_lock_sock(sk); |
| 368 | |
| 369 | if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
| 370 | goto out; |
| 371 | |
| 372 | if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 && |
| 373 | !csk_flag(sk, CSK_ABORT_SHUTDOWN)) { |
| 374 | struct sk_buff *skb; |
| 375 | |
| 376 | skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); |
| 377 | if (skb) |
| 378 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb); |
| 379 | } |
| 380 | |
| 381 | if (sk->sk_state == TCP_CLOSE) |
| 382 | inet_csk_destroy_sock(sk); |
| 383 | |
| 384 | out: |
| 385 | bh_unlock_sock(sk); |
| 386 | local_bh_enable(); |
| 387 | sock_put(sk); |
| 388 | } |
| 389 | |
| 390 | /* |
| 391 | * Wait until a socket enters on of the given states. |
| 392 | */ |
| 393 | static int wait_for_states(struct sock *sk, unsigned int states) |
| 394 | { |
| 395 | DECLARE_WAITQUEUE(wait, current); |
| 396 | struct socket_wq _sk_wq; |
| 397 | long current_timeo; |
| 398 | int err = 0; |
| 399 | |
| 400 | current_timeo = 200; |
| 401 | |
| 402 | /* |
| 403 | * We want this to work even when there's no associated struct socket. |
| 404 | * In that case we provide a temporary wait_queue_head_t. |
| 405 | */ |
| 406 | if (!sk->sk_wq) { |
| 407 | init_waitqueue_head(&_sk_wq.wait); |
| 408 | _sk_wq.fasync_list = NULL; |
| 409 | init_rcu_head_on_stack(&_sk_wq.rcu); |
| 410 | RCU_INIT_POINTER(sk->sk_wq, &_sk_wq); |
| 411 | } |
| 412 | |
| 413 | add_wait_queue(sk_sleep(sk), &wait); |
| 414 | while (!sk_in_state(sk, states)) { |
| 415 | if (!current_timeo) { |
| 416 | err = -EBUSY; |
| 417 | break; |
| 418 | } |
| 419 | if (signal_pending(current)) { |
| 420 | err = sock_intr_errno(current_timeo); |
| 421 | break; |
| 422 | } |
| 423 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 424 | release_sock(sk); |
| 425 | if (!sk_in_state(sk, states)) |
| 426 | current_timeo = schedule_timeout(current_timeo); |
| 427 | __set_current_state(TASK_RUNNING); |
| 428 | lock_sock(sk); |
| 429 | } |
| 430 | remove_wait_queue(sk_sleep(sk), &wait); |
| 431 | |
| 432 | if (rcu_dereference(sk->sk_wq) == &_sk_wq) |
| 433 | sk->sk_wq = NULL; |
| 434 | return err; |
| 435 | } |
| 436 | |
| 437 | int chtls_disconnect(struct sock *sk, int flags) |
| 438 | { |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 439 | struct tcp_sock *tp; |
| 440 | int err; |
| 441 | |
| 442 | tp = tcp_sk(sk); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 443 | chtls_purge_recv_queue(sk); |
| 444 | chtls_purge_receive_queue(sk); |
| 445 | chtls_purge_write_queue(sk); |
| 446 | |
| 447 | if (sk->sk_state != TCP_CLOSE) { |
| 448 | sk->sk_err = ECONNRESET; |
| 449 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL); |
| 450 | err = wait_for_states(sk, TCPF_CLOSE); |
| 451 | if (err) |
| 452 | return err; |
| 453 | } |
| 454 | chtls_purge_recv_queue(sk); |
| 455 | chtls_purge_receive_queue(sk); |
| 456 | tp->max_window = 0xFFFF << (tp->rx_opt.snd_wscale); |
| 457 | return tcp_disconnect(sk, flags); |
| 458 | } |
| 459 | |
| 460 | #define SHUTDOWN_ELIGIBLE_STATE (TCPF_ESTABLISHED | \ |
| 461 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT) |
| 462 | void chtls_shutdown(struct sock *sk, int how) |
| 463 | { |
| 464 | if ((how & SEND_SHUTDOWN) && |
| 465 | sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) && |
| 466 | make_close_transition(sk)) |
| 467 | chtls_close_conn(sk); |
| 468 | } |
| 469 | |
| 470 | void chtls_destroy_sock(struct sock *sk) |
| 471 | { |
| 472 | struct chtls_sock *csk; |
| 473 | |
| 474 | csk = rcu_dereference_sk_user_data(sk); |
| 475 | chtls_purge_recv_queue(sk); |
| 476 | csk->ulp_mode = ULP_MODE_NONE; |
| 477 | chtls_purge_write_queue(sk); |
| 478 | free_tls_keyid(sk); |
| 479 | kref_put(&csk->kref, chtls_sock_release); |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 480 | csk->cdev = NULL; |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 481 | if (sk->sk_family == AF_INET) |
| 482 | sk->sk_prot = &tcp_prot; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 483 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 484 | else |
| 485 | sk->sk_prot = &tcpv6_prot; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 486 | #endif |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 487 | sk->sk_prot->destroy(sk); |
| 488 | } |
| 489 | |
| 490 | static void reset_listen_child(struct sock *child) |
| 491 | { |
| 492 | struct chtls_sock *csk = rcu_dereference_sk_user_data(child); |
| 493 | struct sk_buff *skb; |
| 494 | |
| 495 | skb = alloc_ctrl_skb(csk->txdata_skb_cache, |
| 496 | sizeof(struct cpl_abort_req)); |
| 497 | |
| 498 | chtls_send_reset(child, CPL_ABORT_SEND_RST, skb); |
| 499 | sock_orphan(child); |
| 500 | INC_ORPHAN_COUNT(child); |
| 501 | if (child->sk_state == TCP_CLOSE) |
| 502 | inet_csk_destroy_sock(child); |
| 503 | } |
| 504 | |
| 505 | static void chtls_disconnect_acceptq(struct sock *listen_sk) |
| 506 | { |
| 507 | struct request_sock **pprev; |
| 508 | |
| 509 | pprev = ACCEPT_QUEUE(listen_sk); |
| 510 | while (*pprev) { |
| 511 | struct request_sock *req = *pprev; |
| 512 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 513 | if (req->rsk_ops == &chtls_rsk_ops || |
| 514 | req->rsk_ops == &chtls_rsk_opsv6) { |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 515 | struct sock *child = req->sk; |
| 516 | |
| 517 | *pprev = req->dl_next; |
| 518 | sk_acceptq_removed(listen_sk); |
| 519 | reqsk_put(req); |
| 520 | sock_hold(child); |
| 521 | local_bh_disable(); |
| 522 | bh_lock_sock(child); |
| 523 | release_tcp_port(child); |
| 524 | reset_listen_child(child); |
| 525 | bh_unlock_sock(child); |
| 526 | local_bh_enable(); |
| 527 | sock_put(child); |
| 528 | } else { |
| 529 | pprev = &req->dl_next; |
| 530 | } |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | static int listen_hashfn(const struct sock *sk) |
| 535 | { |
| 536 | return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1); |
| 537 | } |
| 538 | |
| 539 | static struct listen_info *listen_hash_add(struct chtls_dev *cdev, |
| 540 | struct sock *sk, |
| 541 | unsigned int stid) |
| 542 | { |
| 543 | struct listen_info *p = kmalloc(sizeof(*p), GFP_KERNEL); |
| 544 | |
| 545 | if (p) { |
| 546 | int key = listen_hashfn(sk); |
| 547 | |
| 548 | p->sk = sk; |
| 549 | p->stid = stid; |
| 550 | spin_lock(&cdev->listen_lock); |
| 551 | p->next = cdev->listen_hash_tab[key]; |
| 552 | cdev->listen_hash_tab[key] = p; |
| 553 | spin_unlock(&cdev->listen_lock); |
| 554 | } |
| 555 | return p; |
| 556 | } |
| 557 | |
| 558 | static int listen_hash_find(struct chtls_dev *cdev, |
| 559 | struct sock *sk) |
| 560 | { |
| 561 | struct listen_info *p; |
| 562 | int stid = -1; |
| 563 | int key; |
| 564 | |
| 565 | key = listen_hashfn(sk); |
| 566 | |
| 567 | spin_lock(&cdev->listen_lock); |
| 568 | for (p = cdev->listen_hash_tab[key]; p; p = p->next) |
| 569 | if (p->sk == sk) { |
| 570 | stid = p->stid; |
| 571 | break; |
| 572 | } |
| 573 | spin_unlock(&cdev->listen_lock); |
| 574 | return stid; |
| 575 | } |
| 576 | |
| 577 | static int listen_hash_del(struct chtls_dev *cdev, |
| 578 | struct sock *sk) |
| 579 | { |
| 580 | struct listen_info *p, **prev; |
| 581 | int stid = -1; |
| 582 | int key; |
| 583 | |
| 584 | key = listen_hashfn(sk); |
| 585 | prev = &cdev->listen_hash_tab[key]; |
| 586 | |
| 587 | spin_lock(&cdev->listen_lock); |
| 588 | for (p = *prev; p; prev = &p->next, p = p->next) |
| 589 | if (p->sk == sk) { |
| 590 | stid = p->stid; |
| 591 | *prev = p->next; |
| 592 | kfree(p); |
| 593 | break; |
| 594 | } |
| 595 | spin_unlock(&cdev->listen_lock); |
| 596 | return stid; |
| 597 | } |
| 598 | |
| 599 | static void cleanup_syn_rcv_conn(struct sock *child, struct sock *parent) |
| 600 | { |
| 601 | struct request_sock *req; |
| 602 | struct chtls_sock *csk; |
| 603 | |
| 604 | csk = rcu_dereference_sk_user_data(child); |
| 605 | req = csk->passive_reap_next; |
| 606 | |
| 607 | reqsk_queue_removed(&inet_csk(parent)->icsk_accept_queue, req); |
| 608 | __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq); |
| 609 | chtls_reqsk_free(req); |
| 610 | csk->passive_reap_next = NULL; |
| 611 | } |
| 612 | |
| 613 | static void chtls_reset_synq(struct listen_ctx *listen_ctx) |
| 614 | { |
| 615 | struct sock *listen_sk = listen_ctx->lsk; |
| 616 | |
| 617 | while (!skb_queue_empty(&listen_ctx->synq)) { |
| 618 | struct chtls_sock *csk = |
| 619 | container_of((struct synq *)__skb_dequeue |
| 620 | (&listen_ctx->synq), struct chtls_sock, synq); |
| 621 | struct sock *child = csk->sk; |
| 622 | |
| 623 | cleanup_syn_rcv_conn(child, listen_sk); |
| 624 | sock_hold(child); |
| 625 | local_bh_disable(); |
| 626 | bh_lock_sock(child); |
| 627 | release_tcp_port(child); |
| 628 | reset_listen_child(child); |
| 629 | bh_unlock_sock(child); |
| 630 | local_bh_enable(); |
| 631 | sock_put(child); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) |
| 636 | { |
| 637 | struct net_device *ndev; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 638 | #if IS_ENABLED(CONFIG_IPV6) |
| 639 | bool clip_valid = false; |
| 640 | #endif |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 641 | struct listen_ctx *ctx; |
| 642 | struct adapter *adap; |
| 643 | struct port_info *pi; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 644 | int ret = 0; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 645 | int stid; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 646 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 647 | rcu_read_lock(); |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 648 | ndev = chtls_find_netdev(cdev, sk); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 649 | rcu_read_unlock(); |
| 650 | if (!ndev) |
| 651 | return -EBADF; |
| 652 | |
| 653 | pi = netdev_priv(ndev); |
| 654 | adap = pi->adapter; |
Arjun Vynipadath | 80f61f1 | 2019-03-04 17:43:02 +0530 | [diff] [blame] | 655 | if (!(adap->flags & CXGB4_FULL_INIT_DONE)) |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 656 | return -EBADF; |
| 657 | |
| 658 | if (listen_hash_find(cdev, sk) >= 0) /* already have it */ |
| 659 | return -EADDRINUSE; |
| 660 | |
| 661 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
| 662 | if (!ctx) |
| 663 | return -ENOMEM; |
| 664 | |
| 665 | __module_get(THIS_MODULE); |
| 666 | ctx->lsk = sk; |
| 667 | ctx->cdev = cdev; |
| 668 | ctx->state = T4_LISTEN_START_PENDING; |
| 669 | skb_queue_head_init(&ctx->synq); |
| 670 | |
| 671 | stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx); |
| 672 | if (stid < 0) |
| 673 | goto free_ctx; |
| 674 | |
| 675 | sock_hold(sk); |
| 676 | if (!listen_hash_add(cdev, sk, stid)) |
| 677 | goto free_stid; |
| 678 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 679 | if (sk->sk_family == PF_INET) { |
| 680 | ret = cxgb4_create_server(ndev, stid, |
| 681 | inet_sk(sk)->inet_rcv_saddr, |
| 682 | inet_sk(sk)->inet_sport, 0, |
| 683 | cdev->lldi->rxq_ids[0]); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 684 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 685 | } else { |
| 686 | int addr_type; |
| 687 | |
| 688 | addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); |
| 689 | if (addr_type != IPV6_ADDR_ANY) { |
| 690 | ret = cxgb4_clip_get(ndev, (const u32 *) |
| 691 | &sk->sk_v6_rcv_saddr, 1); |
| 692 | if (ret) |
| 693 | goto del_hash; |
| 694 | clip_valid = true; |
| 695 | } |
| 696 | ret = cxgb4_create_server6(ndev, stid, |
| 697 | &sk->sk_v6_rcv_saddr, |
| 698 | inet_sk(sk)->inet_sport, |
| 699 | cdev->lldi->rxq_ids[0]); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 700 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 701 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 702 | if (ret > 0) |
| 703 | ret = net_xmit_errno(ret); |
| 704 | if (ret) |
| 705 | goto del_hash; |
| 706 | return 0; |
| 707 | del_hash: |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 708 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 709 | if (clip_valid) |
| 710 | cxgb4_clip_release(ndev, (const u32 *)&sk->sk_v6_rcv_saddr, 1); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 711 | #endif |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 712 | listen_hash_del(cdev, sk); |
| 713 | free_stid: |
| 714 | cxgb4_free_stid(cdev->tids, stid, sk->sk_family); |
| 715 | sock_put(sk); |
| 716 | free_ctx: |
| 717 | kfree(ctx); |
| 718 | module_put(THIS_MODULE); |
| 719 | return -EBADF; |
| 720 | } |
| 721 | |
| 722 | void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk) |
| 723 | { |
| 724 | struct listen_ctx *listen_ctx; |
| 725 | int stid; |
| 726 | |
| 727 | stid = listen_hash_del(cdev, sk); |
| 728 | if (stid < 0) |
| 729 | return; |
| 730 | |
| 731 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
| 732 | chtls_reset_synq(listen_ctx); |
| 733 | |
| 734 | cxgb4_remove_server(cdev->lldi->ports[0], stid, |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 735 | cdev->lldi->rxq_ids[0], sk->sk_family == PF_INET6); |
| 736 | |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 737 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 738 | if (sk->sk_family == PF_INET6) { |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 739 | struct chtls_sock *csk; |
| 740 | int addr_type = 0; |
| 741 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 742 | csk = rcu_dereference_sk_user_data(sk); |
| 743 | addr_type = ipv6_addr_type((const struct in6_addr *) |
| 744 | &sk->sk_v6_rcv_saddr); |
| 745 | if (addr_type != IPV6_ADDR_ANY) |
| 746 | cxgb4_clip_release(csk->egress_dev, (const u32 *) |
| 747 | &sk->sk_v6_rcv_saddr, 1); |
| 748 | } |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 749 | #endif |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 750 | chtls_disconnect_acceptq(sk); |
| 751 | } |
| 752 | |
| 753 | static int chtls_pass_open_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
| 754 | { |
| 755 | struct cpl_pass_open_rpl *rpl = cplhdr(skb) + RSS_HDR; |
| 756 | unsigned int stid = GET_TID(rpl); |
| 757 | struct listen_ctx *listen_ctx; |
| 758 | |
| 759 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
| 760 | if (!listen_ctx) |
| 761 | return CPL_RET_BUF_DONE; |
| 762 | |
| 763 | if (listen_ctx->state == T4_LISTEN_START_PENDING) { |
| 764 | listen_ctx->state = T4_LISTEN_STARTED; |
| 765 | return CPL_RET_BUF_DONE; |
| 766 | } |
| 767 | |
| 768 | if (rpl->status != CPL_ERR_NONE) { |
| 769 | pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n", |
| 770 | rpl->status, stid); |
| 771 | return CPL_RET_BUF_DONE; |
| 772 | } |
| 773 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); |
| 774 | sock_put(listen_ctx->lsk); |
| 775 | kfree(listen_ctx); |
| 776 | module_put(THIS_MODULE); |
| 777 | |
| 778 | return 0; |
| 779 | } |
| 780 | |
| 781 | static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
| 782 | { |
| 783 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb) + RSS_HDR; |
| 784 | struct listen_ctx *listen_ctx; |
| 785 | unsigned int stid; |
| 786 | void *data; |
| 787 | |
| 788 | stid = GET_TID(rpl); |
| 789 | data = lookup_stid(cdev->tids, stid); |
| 790 | listen_ctx = (struct listen_ctx *)data; |
| 791 | |
| 792 | if (rpl->status != CPL_ERR_NONE) { |
| 793 | pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n", |
| 794 | rpl->status, stid); |
| 795 | return CPL_RET_BUF_DONE; |
| 796 | } |
| 797 | |
| 798 | cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family); |
| 799 | sock_put(listen_ctx->lsk); |
| 800 | kfree(listen_ctx); |
| 801 | module_put(THIS_MODULE); |
| 802 | |
| 803 | return 0; |
| 804 | } |
| 805 | |
Vinay Kumar Yadav | 93e23eb | 2019-12-19 16:21:48 +0530 | [diff] [blame] | 806 | static void chtls_purge_wr_queue(struct sock *sk) |
| 807 | { |
| 808 | struct sk_buff *skb; |
| 809 | |
| 810 | while ((skb = dequeue_wr(sk)) != NULL) |
| 811 | kfree_skb(skb); |
| 812 | } |
| 813 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 814 | static void chtls_release_resources(struct sock *sk) |
| 815 | { |
| 816 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 817 | struct chtls_dev *cdev = csk->cdev; |
| 818 | unsigned int tid = csk->tid; |
| 819 | struct tid_info *tids; |
| 820 | |
| 821 | if (!cdev) |
| 822 | return; |
| 823 | |
| 824 | tids = cdev->tids; |
| 825 | kfree_skb(csk->txdata_skb_cache); |
| 826 | csk->txdata_skb_cache = NULL; |
| 827 | |
Vinay Kumar Yadav | 93e23eb | 2019-12-19 16:21:48 +0530 | [diff] [blame] | 828 | if (csk->wr_credits != csk->wr_max_credits) { |
| 829 | chtls_purge_wr_queue(sk); |
| 830 | chtls_reset_wr_list(csk); |
| 831 | } |
| 832 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 833 | if (csk->l2t_entry) { |
| 834 | cxgb4_l2t_release(csk->l2t_entry); |
| 835 | csk->l2t_entry = NULL; |
| 836 | } |
| 837 | |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 838 | if (sk->sk_state != TCP_SYN_SENT) { |
| 839 | cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family); |
| 840 | sock_put(sk); |
| 841 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 842 | } |
| 843 | |
| 844 | static void chtls_conn_done(struct sock *sk) |
| 845 | { |
| 846 | if (sock_flag(sk, SOCK_DEAD)) |
| 847 | chtls_purge_receive_queue(sk); |
| 848 | sk_wakeup_sleepers(sk, 0); |
| 849 | tcp_done(sk); |
| 850 | } |
| 851 | |
| 852 | static void do_abort_syn_rcv(struct sock *child, struct sock *parent) |
| 853 | { |
| 854 | /* |
| 855 | * If the server is still open we clean up the child connection, |
| 856 | * otherwise the server already did the clean up as it was purging |
| 857 | * its SYN queue and the skb was just sitting in its backlog. |
| 858 | */ |
| 859 | if (likely(parent->sk_state == TCP_LISTEN)) { |
| 860 | cleanup_syn_rcv_conn(child, parent); |
| 861 | /* Without the below call to sock_orphan, |
| 862 | * we leak the socket resource with syn_flood test |
| 863 | * as inet_csk_destroy_sock will not be called |
| 864 | * in tcp_done since SOCK_DEAD flag is not set. |
| 865 | * Kernel handles this differently where new socket is |
| 866 | * created only after 3 way handshake is done. |
| 867 | */ |
| 868 | sock_orphan(child); |
| 869 | percpu_counter_inc((child)->sk_prot->orphan_count); |
| 870 | chtls_release_resources(child); |
| 871 | chtls_conn_done(child); |
| 872 | } else { |
| 873 | if (csk_flag(child, CSK_RST_ABORTED)) { |
| 874 | chtls_release_resources(child); |
| 875 | chtls_conn_done(child); |
| 876 | } |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | static void pass_open_abort(struct sock *child, struct sock *parent, |
| 881 | struct sk_buff *skb) |
| 882 | { |
| 883 | do_abort_syn_rcv(child, parent); |
| 884 | kfree_skb(skb); |
| 885 | } |
| 886 | |
| 887 | static void bl_pass_open_abort(struct sock *lsk, struct sk_buff *skb) |
| 888 | { |
| 889 | pass_open_abort(skb->sk, lsk, skb); |
| 890 | } |
| 891 | |
| 892 | static void chtls_pass_open_arp_failure(struct sock *sk, |
| 893 | struct sk_buff *skb) |
| 894 | { |
| 895 | const struct request_sock *oreq; |
| 896 | struct chtls_sock *csk; |
| 897 | struct chtls_dev *cdev; |
| 898 | struct sock *parent; |
| 899 | void *data; |
| 900 | |
| 901 | csk = rcu_dereference_sk_user_data(sk); |
| 902 | cdev = csk->cdev; |
| 903 | |
| 904 | /* |
| 905 | * If the connection is being aborted due to the parent listening |
| 906 | * socket going away there's nothing to do, the ABORT_REQ will close |
| 907 | * the connection. |
| 908 | */ |
| 909 | if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) { |
| 910 | kfree_skb(skb); |
| 911 | return; |
| 912 | } |
| 913 | |
| 914 | oreq = csk->passive_reap_next; |
| 915 | data = lookup_stid(cdev->tids, oreq->ts_recent); |
| 916 | parent = ((struct listen_ctx *)data)->lsk; |
| 917 | |
| 918 | bh_lock_sock(parent); |
| 919 | if (!sock_owned_by_user(parent)) { |
| 920 | pass_open_abort(sk, parent, skb); |
| 921 | } else { |
| 922 | BLOG_SKB_CB(skb)->backlog_rcv = bl_pass_open_abort; |
| 923 | __sk_add_backlog(parent, skb); |
| 924 | } |
| 925 | bh_unlock_sock(parent); |
| 926 | } |
| 927 | |
| 928 | static void chtls_accept_rpl_arp_failure(void *handle, |
| 929 | struct sk_buff *skb) |
| 930 | { |
| 931 | struct sock *sk = (struct sock *)handle; |
| 932 | |
| 933 | sock_hold(sk); |
| 934 | process_cpl_msg(chtls_pass_open_arp_failure, sk, skb); |
| 935 | sock_put(sk); |
| 936 | } |
| 937 | |
| 938 | static unsigned int chtls_select_mss(const struct chtls_sock *csk, |
| 939 | unsigned int pmtu, |
| 940 | struct cpl_pass_accept_req *req) |
| 941 | { |
| 942 | struct chtls_dev *cdev; |
| 943 | struct dst_entry *dst; |
| 944 | unsigned int tcpoptsz; |
| 945 | unsigned int iphdrsz; |
| 946 | unsigned int mtu_idx; |
| 947 | struct tcp_sock *tp; |
| 948 | unsigned int mss; |
| 949 | struct sock *sk; |
| 950 | |
| 951 | mss = ntohs(req->tcpopt.mss); |
| 952 | sk = csk->sk; |
| 953 | dst = __sk_dst_get(sk); |
| 954 | cdev = csk->cdev; |
| 955 | tp = tcp_sk(sk); |
| 956 | tcpoptsz = 0; |
| 957 | |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 958 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 959 | if (sk->sk_family == AF_INET6) |
| 960 | iphdrsz = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); |
| 961 | else |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 962 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 963 | iphdrsz = sizeof(struct iphdr) + sizeof(struct tcphdr); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 964 | if (req->tcpopt.tstamp) |
| 965 | tcpoptsz += round_up(TCPOLEN_TIMESTAMP, 4); |
| 966 | |
| 967 | tp->advmss = dst_metric_advmss(dst); |
| 968 | if (USER_MSS(tp) && tp->advmss > USER_MSS(tp)) |
| 969 | tp->advmss = USER_MSS(tp); |
| 970 | if (tp->advmss > pmtu - iphdrsz) |
| 971 | tp->advmss = pmtu - iphdrsz; |
| 972 | if (mss && tp->advmss > mss) |
| 973 | tp->advmss = mss; |
| 974 | |
| 975 | tp->advmss = cxgb4_best_aligned_mtu(cdev->lldi->mtus, |
| 976 | iphdrsz + tcpoptsz, |
| 977 | tp->advmss - tcpoptsz, |
| 978 | 8, &mtu_idx); |
| 979 | tp->advmss -= iphdrsz; |
| 980 | |
| 981 | inet_csk(sk)->icsk_pmtu_cookie = pmtu; |
| 982 | return mtu_idx; |
| 983 | } |
| 984 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 985 | static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp) |
| 986 | { |
| 987 | int wscale = 0; |
| 988 | |
| 989 | if (space > MAX_RCV_WND) |
| 990 | space = MAX_RCV_WND; |
| 991 | if (win_clamp && win_clamp < space) |
| 992 | space = win_clamp; |
| 993 | |
| 994 | if (wscale_ok) { |
| 995 | while (wscale < 14 && (65535 << wscale) < space) |
| 996 | wscale++; |
| 997 | } |
| 998 | return wscale; |
| 999 | } |
| 1000 | |
| 1001 | static void chtls_pass_accept_rpl(struct sk_buff *skb, |
| 1002 | struct cpl_pass_accept_req *req, |
| 1003 | unsigned int tid) |
| 1004 | |
| 1005 | { |
| 1006 | struct cpl_t5_pass_accept_rpl *rpl5; |
| 1007 | struct cxgb4_lld_info *lldi; |
| 1008 | const struct tcphdr *tcph; |
| 1009 | const struct tcp_sock *tp; |
| 1010 | struct chtls_sock *csk; |
| 1011 | unsigned int len; |
| 1012 | struct sock *sk; |
| 1013 | u32 opt2, hlen; |
| 1014 | u64 opt0; |
| 1015 | |
| 1016 | sk = skb->sk; |
| 1017 | tp = tcp_sk(sk); |
| 1018 | csk = sk->sk_user_data; |
| 1019 | csk->tid = tid; |
| 1020 | lldi = csk->cdev->lldi; |
| 1021 | len = roundup(sizeof(*rpl5), 16); |
| 1022 | |
| 1023 | rpl5 = __skb_put_zero(skb, len); |
| 1024 | INIT_TP_WR(rpl5, tid); |
| 1025 | |
| 1026 | OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, |
| 1027 | csk->tid)); |
| 1028 | csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)), |
| 1029 | req); |
| 1030 | opt0 = TCAM_BYPASS_F | |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1031 | WND_SCALE_V(RCV_WSCALE(tp)) | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1032 | MSS_IDX_V(csk->mtu_idx) | |
| 1033 | L2T_IDX_V(csk->l2t_entry->idx) | |
| 1034 | NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) | |
| 1035 | TX_CHAN_V(csk->tx_chan) | |
| 1036 | SMAC_SEL_V(csk->smac_idx) | |
| 1037 | DSCP_V(csk->tos >> 2) | |
| 1038 | ULP_MODE_V(ULP_MODE_TLS) | |
| 1039 | RCV_BUFSIZ_V(min(tp->rcv_wnd >> 10, RCV_BUFSIZ_M)); |
| 1040 | |
| 1041 | opt2 = RX_CHANNEL_V(0) | |
| 1042 | RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid); |
| 1043 | |
| 1044 | if (!is_t5(lldi->adapter_type)) |
| 1045 | opt2 |= RX_FC_DISABLE_F; |
| 1046 | if (req->tcpopt.tstamp) |
| 1047 | opt2 |= TSTAMPS_EN_F; |
| 1048 | if (req->tcpopt.sack) |
| 1049 | opt2 |= SACK_EN_F; |
| 1050 | hlen = ntohl(req->hdr_len); |
| 1051 | |
| 1052 | tcph = (struct tcphdr *)((u8 *)(req + 1) + |
| 1053 | T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen)); |
| 1054 | if (tcph->ece && tcph->cwr) |
| 1055 | opt2 |= CCTRL_ECN_V(1); |
| 1056 | opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); |
| 1057 | opt2 |= T5_ISS_F; |
| 1058 | opt2 |= T5_OPT_2_VALID_F; |
| 1059 | rpl5->opt0 = cpu_to_be64(opt0); |
| 1060 | rpl5->opt2 = cpu_to_be32(opt2); |
| 1061 | rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); |
| 1062 | set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); |
| 1063 | t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure); |
| 1064 | cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); |
| 1065 | } |
| 1066 | |
| 1067 | static void inet_inherit_port(struct inet_hashinfo *hash_info, |
| 1068 | struct sock *lsk, struct sock *newsk) |
| 1069 | { |
| 1070 | local_bh_disable(); |
| 1071 | __inet_inherit_port(lsk, newsk); |
| 1072 | local_bh_enable(); |
| 1073 | } |
| 1074 | |
| 1075 | static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
| 1076 | { |
| 1077 | if (skb->protocol) { |
| 1078 | kfree_skb(skb); |
| 1079 | return 0; |
| 1080 | } |
| 1081 | BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); |
| 1082 | return 0; |
| 1083 | } |
| 1084 | |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1085 | static void chtls_set_tcp_window(struct chtls_sock *csk) |
| 1086 | { |
| 1087 | struct net_device *ndev = csk->egress_dev; |
| 1088 | struct port_info *pi = netdev_priv(ndev); |
| 1089 | unsigned int linkspeed; |
| 1090 | u8 scale; |
| 1091 | |
| 1092 | linkspeed = pi->link_cfg.speed; |
| 1093 | scale = linkspeed / SPEED_10000; |
| 1094 | #define CHTLS_10G_RCVWIN (256 * 1024) |
| 1095 | csk->rcv_win = CHTLS_10G_RCVWIN; |
| 1096 | if (scale) |
| 1097 | csk->rcv_win *= scale; |
| 1098 | #define CHTLS_10G_SNDWIN (256 * 1024) |
| 1099 | csk->snd_win = CHTLS_10G_SNDWIN; |
| 1100 | if (scale) |
| 1101 | csk->snd_win *= scale; |
| 1102 | } |
| 1103 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1104 | static struct sock *chtls_recv_sock(struct sock *lsk, |
| 1105 | struct request_sock *oreq, |
| 1106 | void *network_hdr, |
| 1107 | const struct cpl_pass_accept_req *req, |
| 1108 | struct chtls_dev *cdev) |
| 1109 | { |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1110 | struct neighbour *n = NULL; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1111 | struct inet_sock *newinet; |
| 1112 | const struct iphdr *iph; |
Atul Gupta | 76f7164 | 2019-01-17 20:56:21 -0800 | [diff] [blame] | 1113 | struct tls_context *ctx; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1114 | struct net_device *ndev; |
| 1115 | struct chtls_sock *csk; |
| 1116 | struct dst_entry *dst; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1117 | struct tcp_sock *tp; |
| 1118 | struct sock *newsk; |
| 1119 | u16 port_id; |
| 1120 | int rxq_idx; |
| 1121 | int step; |
| 1122 | |
| 1123 | iph = (const struct iphdr *)network_hdr; |
| 1124 | newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); |
| 1125 | if (!newsk) |
| 1126 | goto free_oreq; |
| 1127 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1128 | if (lsk->sk_family == AF_INET) { |
| 1129 | dst = inet_csk_route_child_sock(lsk, newsk, oreq); |
| 1130 | if (!dst) |
| 1131 | goto free_sk; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1132 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1133 | n = dst_neigh_lookup(dst, &iph->saddr); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1134 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1135 | } else { |
| 1136 | const struct ipv6hdr *ip6h; |
| 1137 | struct flowi6 fl6; |
| 1138 | |
| 1139 | ip6h = (const struct ipv6hdr *)network_hdr; |
| 1140 | memset(&fl6, 0, sizeof(fl6)); |
| 1141 | fl6.flowi6_proto = IPPROTO_TCP; |
| 1142 | fl6.saddr = ip6h->daddr; |
| 1143 | fl6.daddr = ip6h->saddr; |
| 1144 | fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port; |
| 1145 | fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num); |
| 1146 | security_req_classify_flow(oreq, flowi6_to_flowi(&fl6)); |
| 1147 | dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL); |
| 1148 | if (IS_ERR(dst)) |
| 1149 | goto free_sk; |
| 1150 | n = dst_neigh_lookup(dst, &ip6h->saddr); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1151 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1152 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1153 | if (!n) |
| 1154 | goto free_sk; |
| 1155 | |
| 1156 | ndev = n->dev; |
| 1157 | if (!ndev) |
| 1158 | goto free_dst; |
| 1159 | port_id = cxgb4_port_idx(ndev); |
| 1160 | |
| 1161 | csk = chtls_sock_create(cdev); |
| 1162 | if (!csk) |
| 1163 | goto free_dst; |
| 1164 | |
| 1165 | csk->l2t_entry = cxgb4_l2t_get(cdev->lldi->l2t, n, ndev, 0); |
| 1166 | if (!csk->l2t_entry) |
| 1167 | goto free_csk; |
| 1168 | |
| 1169 | newsk->sk_user_data = csk; |
| 1170 | newsk->sk_backlog_rcv = chtls_backlog_rcv; |
| 1171 | |
| 1172 | tp = tcp_sk(newsk); |
| 1173 | newinet = inet_sk(newsk); |
| 1174 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1175 | if (iph->version == 0x4) { |
| 1176 | newinet->inet_daddr = iph->saddr; |
| 1177 | newinet->inet_rcv_saddr = iph->daddr; |
| 1178 | newinet->inet_saddr = iph->daddr; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1179 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1180 | } else { |
| 1181 | struct tcp6_sock *newtcp6sk = (struct tcp6_sock *)newsk; |
| 1182 | struct inet_request_sock *treq = inet_rsk(oreq); |
| 1183 | struct ipv6_pinfo *newnp = inet6_sk(newsk); |
| 1184 | struct ipv6_pinfo *np = inet6_sk(lsk); |
| 1185 | |
| 1186 | inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; |
| 1187 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
| 1188 | newsk->sk_v6_daddr = treq->ir_v6_rmt_addr; |
| 1189 | newsk->sk_v6_rcv_saddr = treq->ir_v6_loc_addr; |
| 1190 | inet6_sk(newsk)->saddr = treq->ir_v6_loc_addr; |
| 1191 | newnp->ipv6_fl_list = NULL; |
| 1192 | newnp->pktoptions = NULL; |
| 1193 | newsk->sk_bound_dev_if = treq->ir_iif; |
| 1194 | newinet->inet_opt = NULL; |
| 1195 | newinet->inet_daddr = LOOPBACK4_IPV6; |
| 1196 | newinet->inet_saddr = LOOPBACK4_IPV6; |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1197 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1198 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1199 | |
| 1200 | oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
| 1201 | sk_setup_caps(newsk, dst); |
Atul Gupta | 76f7164 | 2019-01-17 20:56:21 -0800 | [diff] [blame] | 1202 | ctx = tls_get_ctx(lsk); |
| 1203 | newsk->sk_destruct = ctx->sk_destruct; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1204 | csk->sk = newsk; |
| 1205 | csk->passive_reap_next = oreq; |
| 1206 | csk->tx_chan = cxgb4_port_chan(ndev); |
| 1207 | csk->port_id = port_id; |
| 1208 | csk->egress_dev = ndev; |
| 1209 | csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1210 | chtls_set_tcp_window(csk); |
| 1211 | tp->rcv_wnd = csk->rcv_win; |
| 1212 | csk->sndbuf = csk->snd_win; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1213 | csk->ulp_mode = ULP_MODE_TLS; |
| 1214 | step = cdev->lldi->nrxq / cdev->lldi->nchan; |
| 1215 | csk->rss_qid = cdev->lldi->rxq_ids[port_id * step]; |
| 1216 | rxq_idx = port_id * step; |
| 1217 | csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : |
| 1218 | port_id * step; |
| 1219 | csk->sndbuf = newsk->sk_sndbuf; |
Santosh Rastapur | 02d805d | 2018-11-21 13:40:24 +0530 | [diff] [blame] | 1220 | csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1221 | RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1222 | sock_net(newsk)-> |
| 1223 | ipv4.sysctl_tcp_window_scaling, |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1224 | tp->window_clamp); |
| 1225 | neigh_release(n); |
| 1226 | inet_inherit_port(&tcp_hashinfo, lsk, newsk); |
| 1227 | csk_set_flag(csk, CSK_CONN_INLINE); |
| 1228 | bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */ |
| 1229 | |
| 1230 | return newsk; |
| 1231 | free_csk: |
| 1232 | chtls_sock_release(&csk->kref); |
| 1233 | free_dst: |
| 1234 | dst_release(dst); |
| 1235 | free_sk: |
| 1236 | inet_csk_prepare_forced_close(newsk); |
| 1237 | tcp_done(newsk); |
| 1238 | free_oreq: |
| 1239 | chtls_reqsk_free(oreq); |
| 1240 | return NULL; |
| 1241 | } |
| 1242 | |
| 1243 | /* |
| 1244 | * Populate a TID_RELEASE WR. The skb must be already propely sized. |
| 1245 | */ |
| 1246 | static void mk_tid_release(struct sk_buff *skb, |
| 1247 | unsigned int chan, unsigned int tid) |
| 1248 | { |
| 1249 | struct cpl_tid_release *req; |
| 1250 | unsigned int len; |
| 1251 | |
| 1252 | len = roundup(sizeof(struct cpl_tid_release), 16); |
| 1253 | req = (struct cpl_tid_release *)__skb_put(skb, len); |
| 1254 | memset(req, 0, len); |
| 1255 | set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); |
| 1256 | INIT_TP_WR_CPL(req, CPL_TID_RELEASE, tid); |
| 1257 | } |
| 1258 | |
| 1259 | static int chtls_get_module(struct sock *sk) |
| 1260 | { |
| 1261 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 1262 | |
| 1263 | if (!try_module_get(icsk->icsk_ulp_ops->owner)) |
| 1264 | return -1; |
| 1265 | |
| 1266 | return 0; |
| 1267 | } |
| 1268 | |
| 1269 | static void chtls_pass_accept_request(struct sock *sk, |
| 1270 | struct sk_buff *skb) |
| 1271 | { |
| 1272 | struct cpl_t5_pass_accept_rpl *rpl; |
| 1273 | struct cpl_pass_accept_req *req; |
| 1274 | struct listen_ctx *listen_ctx; |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1275 | struct vlan_ethhdr *vlan_eh; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1276 | struct request_sock *oreq; |
| 1277 | struct sk_buff *reply_skb; |
| 1278 | struct chtls_sock *csk; |
| 1279 | struct chtls_dev *cdev; |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1280 | struct ipv6hdr *ip6h; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1281 | struct tcphdr *tcph; |
| 1282 | struct sock *newsk; |
| 1283 | struct ethhdr *eh; |
| 1284 | struct iphdr *iph; |
| 1285 | void *network_hdr; |
| 1286 | unsigned int stid; |
| 1287 | unsigned int len; |
| 1288 | unsigned int tid; |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1289 | bool th_ecn, ect; |
| 1290 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ |
| 1291 | u16 eth_hdr_len; |
| 1292 | bool ecn_ok; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1293 | |
| 1294 | req = cplhdr(skb) + RSS_HDR; |
| 1295 | tid = GET_TID(req); |
| 1296 | cdev = BLOG_SKB_CB(skb)->cdev; |
| 1297 | newsk = lookup_tid(cdev->tids, tid); |
| 1298 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
| 1299 | if (newsk) { |
| 1300 | pr_info("tid (%d) already in use\n", tid); |
| 1301 | return; |
| 1302 | } |
| 1303 | |
| 1304 | len = roundup(sizeof(*rpl), 16); |
| 1305 | reply_skb = alloc_skb(len, GFP_ATOMIC); |
| 1306 | if (!reply_skb) { |
| 1307 | cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family); |
| 1308 | kfree_skb(skb); |
| 1309 | return; |
| 1310 | } |
| 1311 | |
| 1312 | if (sk->sk_state != TCP_LISTEN) |
| 1313 | goto reject; |
| 1314 | |
| 1315 | if (inet_csk_reqsk_queue_is_full(sk)) |
| 1316 | goto reject; |
| 1317 | |
| 1318 | if (sk_acceptq_is_full(sk)) |
| 1319 | goto reject; |
| 1320 | |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1321 | |
| 1322 | eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len)); |
| 1323 | if (eth_hdr_len == ETH_HLEN) { |
| 1324 | eh = (struct ethhdr *)(req + 1); |
| 1325 | iph = (struct iphdr *)(eh + 1); |
| 1326 | ip6h = (struct ipv6hdr *)(eh + 1); |
| 1327 | network_hdr = (void *)(eh + 1); |
| 1328 | } else { |
| 1329 | vlan_eh = (struct vlan_ethhdr *)(req + 1); |
| 1330 | iph = (struct iphdr *)(vlan_eh + 1); |
| 1331 | ip6h = (struct ipv6hdr *)(vlan_eh + 1); |
| 1332 | network_hdr = (void *)(vlan_eh + 1); |
| 1333 | } |
| 1334 | |
| 1335 | if (iph->version == 0x4) { |
| 1336 | tcph = (struct tcphdr *)(iph + 1); |
| 1337 | skb_set_network_header(skb, (void *)iph - (void *)req); |
| 1338 | oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true); |
| 1339 | } else { |
| 1340 | tcph = (struct tcphdr *)(ip6h + 1); |
| 1341 | skb_set_network_header(skb, (void *)ip6h - (void *)req); |
| 1342 | oreq = inet_reqsk_alloc(&chtls_rsk_opsv6, sk, false); |
| 1343 | } |
| 1344 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1345 | if (!oreq) |
| 1346 | goto reject; |
| 1347 | |
| 1348 | oreq->rsk_rcv_wnd = 0; |
| 1349 | oreq->rsk_window_clamp = 0; |
| 1350 | oreq->cookie_ts = 0; |
| 1351 | oreq->mss = 0; |
| 1352 | oreq->ts_recent = 0; |
| 1353 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1354 | tcp_rsk(oreq)->tfo_listener = false; |
| 1355 | tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq); |
| 1356 | chtls_set_req_port(oreq, tcph->source, tcph->dest); |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1357 | if (iph->version == 0x4) { |
| 1358 | chtls_set_req_addr(oreq, iph->daddr, iph->saddr); |
| 1359 | ip_dsfield = ipv4_get_dsfield(iph); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1360 | #if IS_ENABLED(CONFIG_IPV6) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1361 | } else { |
| 1362 | inet_rsk(oreq)->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; |
| 1363 | inet_rsk(oreq)->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; |
| 1364 | ip_dsfield = ipv6_get_dsfield(ipv6_hdr(skb)); |
Vinay Kumar Yadav | a624a86 | 2020-06-05 01:53:44 +0530 | [diff] [blame] | 1365 | #endif |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1366 | } |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1367 | if (req->tcpopt.wsf <= 14 && |
| 1368 | sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1369 | inet_rsk(oreq)->wscale_ok = 1; |
| 1370 | inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; |
| 1371 | } |
| 1372 | inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if; |
Atul Gupta | 0c3a16b | 2018-12-11 02:20:53 -0800 | [diff] [blame] | 1373 | th_ecn = tcph->ece && tcph->cwr; |
| 1374 | if (th_ecn) { |
| 1375 | ect = !INET_ECN_is_not_ect(ip_dsfield); |
| 1376 | ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn; |
| 1377 | if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk)) |
| 1378 | inet_rsk(oreq)->ecn_ok = 1; |
| 1379 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1380 | |
| 1381 | newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); |
| 1382 | if (!newsk) |
Vinay Kumar Yadav | 6abde0b | 2020-06-02 00:07:05 +0530 | [diff] [blame] | 1383 | goto free_oreq; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1384 | |
| 1385 | if (chtls_get_module(newsk)) |
| 1386 | goto reject; |
| 1387 | inet_csk_reqsk_queue_added(sk); |
| 1388 | reply_skb->sk = newsk; |
| 1389 | chtls_install_cpl_ops(newsk); |
| 1390 | cxgb4_insert_tid(cdev->tids, newsk, tid, newsk->sk_family); |
| 1391 | csk = rcu_dereference_sk_user_data(newsk); |
| 1392 | listen_ctx = (struct listen_ctx *)lookup_stid(cdev->tids, stid); |
| 1393 | csk->listen_ctx = listen_ctx; |
| 1394 | __skb_queue_tail(&listen_ctx->synq, (struct sk_buff *)&csk->synq); |
| 1395 | chtls_pass_accept_rpl(reply_skb, req, tid); |
| 1396 | kfree_skb(skb); |
| 1397 | return; |
| 1398 | |
| 1399 | free_oreq: |
| 1400 | chtls_reqsk_free(oreq); |
| 1401 | reject: |
| 1402 | mk_tid_release(reply_skb, 0, tid); |
| 1403 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
| 1404 | kfree_skb(skb); |
| 1405 | } |
| 1406 | |
| 1407 | /* |
| 1408 | * Handle a CPL_PASS_ACCEPT_REQ message. |
| 1409 | */ |
| 1410 | static int chtls_pass_accept_req(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1411 | { |
| 1412 | struct cpl_pass_accept_req *req = cplhdr(skb) + RSS_HDR; |
| 1413 | struct listen_ctx *ctx; |
| 1414 | unsigned int stid; |
| 1415 | unsigned int tid; |
| 1416 | struct sock *lsk; |
| 1417 | void *data; |
| 1418 | |
| 1419 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
| 1420 | tid = GET_TID(req); |
| 1421 | |
| 1422 | data = lookup_stid(cdev->tids, stid); |
| 1423 | if (!data) |
| 1424 | return 1; |
| 1425 | |
| 1426 | ctx = (struct listen_ctx *)data; |
| 1427 | lsk = ctx->lsk; |
| 1428 | |
Shahjada Abul Husain | 59437d7 | 2019-12-17 12:12:09 +0530 | [diff] [blame] | 1429 | if (unlikely(tid_out_of_range(cdev->tids, tid))) { |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1430 | pr_info("passive open TID %u too large\n", tid); |
| 1431 | return 1; |
| 1432 | } |
| 1433 | |
| 1434 | BLOG_SKB_CB(skb)->cdev = cdev; |
| 1435 | process_cpl_msg(chtls_pass_accept_request, lsk, skb); |
| 1436 | return 0; |
| 1437 | } |
| 1438 | |
| 1439 | /* |
| 1440 | * Completes some final bits of initialization for just established connections |
| 1441 | * and changes their state to TCP_ESTABLISHED. |
| 1442 | * |
| 1443 | * snd_isn here is the ISN after the SYN, i.e., the true ISN + 1. |
| 1444 | */ |
| 1445 | static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) |
| 1446 | { |
| 1447 | struct tcp_sock *tp = tcp_sk(sk); |
| 1448 | |
| 1449 | tp->pushed_seq = snd_isn; |
| 1450 | tp->write_seq = snd_isn; |
| 1451 | tp->snd_nxt = snd_isn; |
| 1452 | tp->snd_una = snd_isn; |
Eric Dumazet | a904a06 | 2019-11-01 10:32:19 -0700 | [diff] [blame] | 1453 | inet_sk(sk)->inet_id = prandom_u32(); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1454 | assign_rxopt(sk, opt); |
| 1455 | |
| 1456 | if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) |
| 1457 | tp->rcv_wup -= tp->rcv_wnd - (RCV_BUFSIZ_M << 10); |
| 1458 | |
| 1459 | smp_mb(); |
| 1460 | tcp_set_state(sk, TCP_ESTABLISHED); |
| 1461 | } |
| 1462 | |
| 1463 | static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb) |
| 1464 | { |
| 1465 | struct sk_buff *abort_skb; |
| 1466 | |
| 1467 | abort_skb = alloc_skb(sizeof(struct cpl_abort_req), GFP_ATOMIC); |
| 1468 | if (abort_skb) |
| 1469 | chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb); |
| 1470 | } |
| 1471 | |
| 1472 | static struct sock *reap_list; |
| 1473 | static DEFINE_SPINLOCK(reap_list_lock); |
| 1474 | |
| 1475 | /* |
| 1476 | * Process the reap list. |
| 1477 | */ |
| 1478 | DECLARE_TASK_FUNC(process_reap_list, task_param) |
| 1479 | { |
| 1480 | spin_lock_bh(&reap_list_lock); |
| 1481 | while (reap_list) { |
| 1482 | struct sock *sk = reap_list; |
| 1483 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 1484 | |
| 1485 | reap_list = csk->passive_reap_next; |
| 1486 | csk->passive_reap_next = NULL; |
| 1487 | spin_unlock(&reap_list_lock); |
| 1488 | sock_hold(sk); |
| 1489 | |
| 1490 | bh_lock_sock(sk); |
| 1491 | chtls_abort_conn(sk, NULL); |
| 1492 | sock_orphan(sk); |
| 1493 | if (sk->sk_state == TCP_CLOSE) |
| 1494 | inet_csk_destroy_sock(sk); |
| 1495 | bh_unlock_sock(sk); |
| 1496 | sock_put(sk); |
| 1497 | spin_lock(&reap_list_lock); |
| 1498 | } |
| 1499 | spin_unlock_bh(&reap_list_lock); |
| 1500 | } |
| 1501 | |
| 1502 | static DECLARE_WORK(reap_task, process_reap_list); |
| 1503 | |
| 1504 | static void add_to_reap_list(struct sock *sk) |
| 1505 | { |
| 1506 | struct chtls_sock *csk = sk->sk_user_data; |
| 1507 | |
| 1508 | local_bh_disable(); |
| 1509 | bh_lock_sock(sk); |
| 1510 | release_tcp_port(sk); /* release the port immediately */ |
| 1511 | |
| 1512 | spin_lock(&reap_list_lock); |
| 1513 | csk->passive_reap_next = reap_list; |
| 1514 | reap_list = sk; |
| 1515 | if (!csk->passive_reap_next) |
| 1516 | schedule_work(&reap_task); |
| 1517 | spin_unlock(&reap_list_lock); |
| 1518 | bh_unlock_sock(sk); |
| 1519 | local_bh_enable(); |
| 1520 | } |
| 1521 | |
| 1522 | static void add_pass_open_to_parent(struct sock *child, struct sock *lsk, |
| 1523 | struct chtls_dev *cdev) |
| 1524 | { |
| 1525 | struct request_sock *oreq; |
| 1526 | struct chtls_sock *csk; |
| 1527 | |
| 1528 | if (lsk->sk_state != TCP_LISTEN) |
| 1529 | return; |
| 1530 | |
| 1531 | csk = child->sk_user_data; |
| 1532 | oreq = csk->passive_reap_next; |
| 1533 | csk->passive_reap_next = NULL; |
| 1534 | |
| 1535 | reqsk_queue_removed(&inet_csk(lsk)->icsk_accept_queue, oreq); |
| 1536 | __skb_unlink((struct sk_buff *)&csk->synq, &csk->listen_ctx->synq); |
| 1537 | |
| 1538 | if (sk_acceptq_is_full(lsk)) { |
| 1539 | chtls_reqsk_free(oreq); |
| 1540 | add_to_reap_list(child); |
| 1541 | } else { |
| 1542 | refcount_set(&oreq->rsk_refcnt, 1); |
| 1543 | inet_csk_reqsk_queue_add(lsk, oreq, child); |
| 1544 | lsk->sk_data_ready(lsk); |
| 1545 | } |
| 1546 | } |
| 1547 | |
| 1548 | static void bl_add_pass_open_to_parent(struct sock *lsk, struct sk_buff *skb) |
| 1549 | { |
| 1550 | struct sock *child = skb->sk; |
| 1551 | |
| 1552 | skb->sk = NULL; |
| 1553 | add_pass_open_to_parent(child, lsk, BLOG_SKB_CB(skb)->cdev); |
| 1554 | kfree_skb(skb); |
| 1555 | } |
| 1556 | |
| 1557 | static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1558 | { |
| 1559 | struct cpl_pass_establish *req = cplhdr(skb) + RSS_HDR; |
| 1560 | struct chtls_sock *csk; |
| 1561 | struct sock *lsk, *sk; |
| 1562 | unsigned int hwtid; |
| 1563 | |
| 1564 | hwtid = GET_TID(req); |
| 1565 | sk = lookup_tid(cdev->tids, hwtid); |
| 1566 | if (!sk) |
| 1567 | return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE); |
| 1568 | |
| 1569 | bh_lock_sock(sk); |
| 1570 | if (unlikely(sock_owned_by_user(sk))) { |
| 1571 | kfree_skb(skb); |
| 1572 | } else { |
| 1573 | unsigned int stid; |
| 1574 | void *data; |
| 1575 | |
| 1576 | csk = sk->sk_user_data; |
| 1577 | csk->wr_max_credits = 64; |
| 1578 | csk->wr_credits = 64; |
| 1579 | csk->wr_unacked = 0; |
| 1580 | make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); |
| 1581 | stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); |
| 1582 | sk->sk_state_change(sk); |
| 1583 | if (unlikely(sk->sk_socket)) |
| 1584 | sk_wake_async(sk, 0, POLL_OUT); |
| 1585 | |
| 1586 | data = lookup_stid(cdev->tids, stid); |
| 1587 | lsk = ((struct listen_ctx *)data)->lsk; |
| 1588 | |
| 1589 | bh_lock_sock(lsk); |
| 1590 | if (unlikely(skb_queue_empty(&csk->listen_ctx->synq))) { |
| 1591 | /* removed from synq */ |
| 1592 | bh_unlock_sock(lsk); |
| 1593 | kfree_skb(skb); |
| 1594 | goto unlock; |
| 1595 | } |
| 1596 | |
| 1597 | if (likely(!sock_owned_by_user(lsk))) { |
| 1598 | kfree_skb(skb); |
| 1599 | add_pass_open_to_parent(sk, lsk, cdev); |
| 1600 | } else { |
| 1601 | skb->sk = sk; |
| 1602 | BLOG_SKB_CB(skb)->cdev = cdev; |
| 1603 | BLOG_SKB_CB(skb)->backlog_rcv = |
| 1604 | bl_add_pass_open_to_parent; |
| 1605 | __sk_add_backlog(lsk, skb); |
| 1606 | } |
| 1607 | bh_unlock_sock(lsk); |
| 1608 | } |
| 1609 | unlock: |
| 1610 | bh_unlock_sock(sk); |
| 1611 | return 0; |
| 1612 | } |
| 1613 | |
| 1614 | /* |
| 1615 | * Handle receipt of an urgent pointer. |
| 1616 | */ |
| 1617 | static void handle_urg_ptr(struct sock *sk, u32 urg_seq) |
| 1618 | { |
| 1619 | struct tcp_sock *tp = tcp_sk(sk); |
| 1620 | |
| 1621 | urg_seq--; |
| 1622 | if (tp->urg_data && !after(urg_seq, tp->urg_seq)) |
| 1623 | return; /* duplicate pointer */ |
| 1624 | |
| 1625 | sk_send_sigurg(sk); |
| 1626 | if (tp->urg_seq == tp->copied_seq && tp->urg_data && |
| 1627 | !sock_flag(sk, SOCK_URGINLINE) && |
| 1628 | tp->copied_seq != tp->rcv_nxt) { |
| 1629 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
| 1630 | |
| 1631 | tp->copied_seq++; |
| 1632 | if (skb && tp->copied_seq - ULP_SKB_CB(skb)->seq >= skb->len) |
| 1633 | chtls_free_skb(sk, skb); |
| 1634 | } |
| 1635 | |
| 1636 | tp->urg_data = TCP_URG_NOTYET; |
| 1637 | tp->urg_seq = urg_seq; |
| 1638 | } |
| 1639 | |
| 1640 | static void check_sk_callbacks(struct chtls_sock *csk) |
| 1641 | { |
| 1642 | struct sock *sk = csk->sk; |
| 1643 | |
| 1644 | if (unlikely(sk->sk_user_data && |
| 1645 | !csk_flag_nochk(csk, CSK_CALLBACKS_CHKD))) |
| 1646 | csk_set_flag(csk, CSK_CALLBACKS_CHKD); |
| 1647 | } |
| 1648 | |
| 1649 | /* |
| 1650 | * Handles Rx data that arrives in a state where the socket isn't accepting |
| 1651 | * new data. |
| 1652 | */ |
| 1653 | static void handle_excess_rx(struct sock *sk, struct sk_buff *skb) |
| 1654 | { |
| 1655 | if (!csk_flag(sk, CSK_ABORT_SHUTDOWN)) |
| 1656 | chtls_abort_conn(sk, skb); |
| 1657 | |
| 1658 | kfree_skb(skb); |
| 1659 | } |
| 1660 | |
| 1661 | static void chtls_recv_data(struct sock *sk, struct sk_buff *skb) |
| 1662 | { |
| 1663 | struct cpl_rx_data *hdr = cplhdr(skb) + RSS_HDR; |
| 1664 | struct chtls_sock *csk; |
| 1665 | struct tcp_sock *tp; |
| 1666 | |
| 1667 | csk = rcu_dereference_sk_user_data(sk); |
| 1668 | tp = tcp_sk(sk); |
| 1669 | |
| 1670 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { |
| 1671 | handle_excess_rx(sk, skb); |
| 1672 | return; |
| 1673 | } |
| 1674 | |
| 1675 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); |
| 1676 | ULP_SKB_CB(skb)->psh = hdr->psh; |
| 1677 | skb_ulp_mode(skb) = ULP_MODE_NONE; |
| 1678 | |
| 1679 | skb_reset_transport_header(skb); |
| 1680 | __skb_pull(skb, sizeof(*hdr) + RSS_HDR); |
| 1681 | if (!skb->data_len) |
| 1682 | __skb_trim(skb, ntohs(hdr->len)); |
| 1683 | |
| 1684 | if (unlikely(hdr->urg)) |
| 1685 | handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg)); |
| 1686 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && |
| 1687 | tp->urg_seq - tp->rcv_nxt < skb->len)) |
| 1688 | tp->urg_data = TCP_URG_VALID | |
| 1689 | skb->data[tp->urg_seq - tp->rcv_nxt]; |
| 1690 | |
| 1691 | if (unlikely(hdr->dack_mode != csk->delack_mode)) { |
| 1692 | csk->delack_mode = hdr->dack_mode; |
| 1693 | csk->delack_seq = tp->rcv_nxt; |
| 1694 | } |
| 1695 | |
| 1696 | tcp_hdr(skb)->fin = 0; |
| 1697 | tp->rcv_nxt += skb->len; |
| 1698 | |
| 1699 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1700 | |
| 1701 | if (!sock_flag(sk, SOCK_DEAD)) { |
| 1702 | check_sk_callbacks(csk); |
| 1703 | sk->sk_data_ready(sk); |
| 1704 | } |
| 1705 | } |
| 1706 | |
| 1707 | static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1708 | { |
| 1709 | struct cpl_rx_data *req = cplhdr(skb) + RSS_HDR; |
| 1710 | unsigned int hwtid = GET_TID(req); |
| 1711 | struct sock *sk; |
| 1712 | |
| 1713 | sk = lookup_tid(cdev->tids, hwtid); |
Gustavo A. R. Silva | 3d8ccf9 | 2018-04-03 15:09:12 -0500 | [diff] [blame] | 1714 | if (unlikely(!sk)) { |
| 1715 | pr_err("can't find conn. for hwtid %u.\n", hwtid); |
| 1716 | return -EINVAL; |
| 1717 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1718 | skb_dst_set(skb, NULL); |
| 1719 | process_cpl_msg(chtls_recv_data, sk, skb); |
| 1720 | return 0; |
| 1721 | } |
| 1722 | |
| 1723 | static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb) |
| 1724 | { |
| 1725 | struct cpl_tls_data *hdr = cplhdr(skb); |
| 1726 | struct chtls_sock *csk; |
| 1727 | struct chtls_hws *tlsk; |
| 1728 | struct tcp_sock *tp; |
| 1729 | |
| 1730 | csk = rcu_dereference_sk_user_data(sk); |
| 1731 | tlsk = &csk->tlshws; |
| 1732 | tp = tcp_sk(sk); |
| 1733 | |
| 1734 | if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) { |
| 1735 | handle_excess_rx(sk, skb); |
| 1736 | return; |
| 1737 | } |
| 1738 | |
| 1739 | ULP_SKB_CB(skb)->seq = ntohl(hdr->seq); |
| 1740 | ULP_SKB_CB(skb)->flags = 0; |
| 1741 | skb_ulp_mode(skb) = ULP_MODE_TLS; |
| 1742 | |
| 1743 | skb_reset_transport_header(skb); |
| 1744 | __skb_pull(skb, sizeof(*hdr)); |
| 1745 | if (!skb->data_len) |
| 1746 | __skb_trim(skb, |
| 1747 | CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd))); |
| 1748 | |
| 1749 | if (unlikely(tp->urg_data == TCP_URG_NOTYET && tp->urg_seq - |
| 1750 | tp->rcv_nxt < skb->len)) |
| 1751 | tp->urg_data = TCP_URG_VALID | |
| 1752 | skb->data[tp->urg_seq - tp->rcv_nxt]; |
| 1753 | |
| 1754 | tcp_hdr(skb)->fin = 0; |
| 1755 | tlsk->pldlen = CPL_TLS_DATA_LENGTH_G(ntohl(hdr->length_pkd)); |
| 1756 | __skb_queue_tail(&tlsk->sk_recv_queue, skb); |
| 1757 | } |
| 1758 | |
| 1759 | static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1760 | { |
| 1761 | struct cpl_tls_data *req = cplhdr(skb); |
| 1762 | unsigned int hwtid = GET_TID(req); |
| 1763 | struct sock *sk; |
| 1764 | |
| 1765 | sk = lookup_tid(cdev->tids, hwtid); |
Gustavo A. R. Silva | 3d8ccf9 | 2018-04-03 15:09:12 -0500 | [diff] [blame] | 1766 | if (unlikely(!sk)) { |
| 1767 | pr_err("can't find conn. for hwtid %u.\n", hwtid); |
| 1768 | return -EINVAL; |
| 1769 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1770 | skb_dst_set(skb, NULL); |
| 1771 | process_cpl_msg(chtls_recv_pdu, sk, skb); |
| 1772 | return 0; |
| 1773 | } |
| 1774 | |
| 1775 | static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen) |
| 1776 | { |
| 1777 | struct tlsrx_cmp_hdr *tls_cmp_hdr = cplhdr(skb); |
| 1778 | |
| 1779 | skb->hdr_len = ntohs((__force __be16)tls_cmp_hdr->length); |
| 1780 | tls_cmp_hdr->length = ntohs((__force __be16)nlen); |
| 1781 | } |
| 1782 | |
| 1783 | static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) |
| 1784 | { |
Atul Gupta | 17a7d24a | 2018-05-14 16:41:38 +0530 | [diff] [blame] | 1785 | struct tlsrx_cmp_hdr *tls_hdr_pkt; |
| 1786 | struct cpl_rx_tls_cmp *cmp_cpl; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1787 | struct sk_buff *skb_rec; |
| 1788 | struct chtls_sock *csk; |
| 1789 | struct chtls_hws *tlsk; |
| 1790 | struct tcp_sock *tp; |
| 1791 | |
Atul Gupta | 17a7d24a | 2018-05-14 16:41:38 +0530 | [diff] [blame] | 1792 | cmp_cpl = cplhdr(skb); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1793 | csk = rcu_dereference_sk_user_data(sk); |
| 1794 | tlsk = &csk->tlshws; |
| 1795 | tp = tcp_sk(sk); |
| 1796 | |
| 1797 | ULP_SKB_CB(skb)->seq = ntohl(cmp_cpl->seq); |
| 1798 | ULP_SKB_CB(skb)->flags = 0; |
| 1799 | |
| 1800 | skb_reset_transport_header(skb); |
| 1801 | __skb_pull(skb, sizeof(*cmp_cpl)); |
Atul Gupta | 17a7d24a | 2018-05-14 16:41:38 +0530 | [diff] [blame] | 1802 | tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data; |
| 1803 | if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M) |
| 1804 | tls_hdr_pkt->type = CONTENT_TYPE_ERROR; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1805 | if (!skb->data_len) |
Atul Gupta | 17a7d24a | 2018-05-14 16:41:38 +0530 | [diff] [blame] | 1806 | __skb_trim(skb, TLS_HEADER_LENGTH); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1807 | |
| 1808 | tp->rcv_nxt += |
| 1809 | CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length)); |
| 1810 | |
Atul Gupta | 17a7d24a | 2018-05-14 16:41:38 +0530 | [diff] [blame] | 1811 | ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1812 | skb_rec = __skb_dequeue(&tlsk->sk_recv_queue); |
| 1813 | if (!skb_rec) { |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1814 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1815 | } else { |
| 1816 | chtls_set_hdrlen(skb, tlsk->pldlen); |
| 1817 | tlsk->pldlen = 0; |
| 1818 | __skb_queue_tail(&sk->sk_receive_queue, skb); |
| 1819 | __skb_queue_tail(&sk->sk_receive_queue, skb_rec); |
| 1820 | } |
| 1821 | |
| 1822 | if (!sock_flag(sk, SOCK_DEAD)) { |
| 1823 | check_sk_callbacks(csk); |
| 1824 | sk->sk_data_ready(sk); |
| 1825 | } |
| 1826 | } |
| 1827 | |
| 1828 | static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1829 | { |
| 1830 | struct cpl_rx_tls_cmp *req = cplhdr(skb); |
| 1831 | unsigned int hwtid = GET_TID(req); |
| 1832 | struct sock *sk; |
| 1833 | |
| 1834 | sk = lookup_tid(cdev->tids, hwtid); |
Gustavo A. R. Silva | 3d8ccf9 | 2018-04-03 15:09:12 -0500 | [diff] [blame] | 1835 | if (unlikely(!sk)) { |
| 1836 | pr_err("can't find conn. for hwtid %u.\n", hwtid); |
| 1837 | return -EINVAL; |
| 1838 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1839 | skb_dst_set(skb, NULL); |
| 1840 | process_cpl_msg(chtls_rx_hdr, sk, skb); |
| 1841 | |
| 1842 | return 0; |
| 1843 | } |
| 1844 | |
| 1845 | static void chtls_timewait(struct sock *sk) |
| 1846 | { |
| 1847 | struct tcp_sock *tp = tcp_sk(sk); |
| 1848 | |
| 1849 | tp->rcv_nxt++; |
Arnd Bergmann | cca9bab | 2018-07-11 12:16:12 +0200 | [diff] [blame] | 1850 | tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1851 | tp->srtt_us = 0; |
| 1852 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); |
| 1853 | } |
| 1854 | |
| 1855 | static void chtls_peer_close(struct sock *sk, struct sk_buff *skb) |
| 1856 | { |
| 1857 | struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); |
| 1858 | |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 1859 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) |
| 1860 | goto out; |
| 1861 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1862 | sk->sk_shutdown |= RCV_SHUTDOWN; |
| 1863 | sock_set_flag(sk, SOCK_DONE); |
| 1864 | |
| 1865 | switch (sk->sk_state) { |
| 1866 | case TCP_SYN_RECV: |
| 1867 | case TCP_ESTABLISHED: |
| 1868 | tcp_set_state(sk, TCP_CLOSE_WAIT); |
| 1869 | break; |
| 1870 | case TCP_FIN_WAIT1: |
| 1871 | tcp_set_state(sk, TCP_CLOSING); |
| 1872 | break; |
| 1873 | case TCP_FIN_WAIT2: |
| 1874 | chtls_release_resources(sk); |
| 1875 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) |
| 1876 | chtls_conn_done(sk); |
| 1877 | else |
| 1878 | chtls_timewait(sk); |
| 1879 | break; |
| 1880 | default: |
| 1881 | pr_info("cpl_peer_close in bad state %d\n", sk->sk_state); |
| 1882 | } |
| 1883 | |
| 1884 | if (!sock_flag(sk, SOCK_DEAD)) { |
| 1885 | sk->sk_state_change(sk); |
| 1886 | /* Do not send POLL_HUP for half duplex close. */ |
| 1887 | |
| 1888 | if ((sk->sk_shutdown & SEND_SHUTDOWN) || |
| 1889 | sk->sk_state == TCP_CLOSE) |
| 1890 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); |
| 1891 | else |
| 1892 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
| 1893 | } |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 1894 | out: |
Vinay Kumar Yadav | 93e23eb | 2019-12-19 16:21:48 +0530 | [diff] [blame] | 1895 | kfree_skb(skb); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1896 | } |
| 1897 | |
| 1898 | static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb) |
| 1899 | { |
| 1900 | struct cpl_close_con_rpl *rpl = cplhdr(skb) + RSS_HDR; |
| 1901 | struct chtls_sock *csk; |
| 1902 | struct tcp_sock *tp; |
| 1903 | |
| 1904 | csk = rcu_dereference_sk_user_data(sk); |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 1905 | |
| 1906 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) |
| 1907 | goto out; |
| 1908 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1909 | tp = tcp_sk(sk); |
| 1910 | |
| 1911 | tp->snd_una = ntohl(rpl->snd_nxt) - 1; /* exclude FIN */ |
| 1912 | |
| 1913 | switch (sk->sk_state) { |
| 1914 | case TCP_CLOSING: |
| 1915 | chtls_release_resources(sk); |
| 1916 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) |
| 1917 | chtls_conn_done(sk); |
| 1918 | else |
| 1919 | chtls_timewait(sk); |
| 1920 | break; |
| 1921 | case TCP_LAST_ACK: |
| 1922 | chtls_release_resources(sk); |
| 1923 | chtls_conn_done(sk); |
| 1924 | break; |
| 1925 | case TCP_FIN_WAIT1: |
| 1926 | tcp_set_state(sk, TCP_FIN_WAIT2); |
| 1927 | sk->sk_shutdown |= SEND_SHUTDOWN; |
| 1928 | |
| 1929 | if (!sock_flag(sk, SOCK_DEAD)) |
| 1930 | sk->sk_state_change(sk); |
| 1931 | else if (tcp_sk(sk)->linger2 < 0 && |
| 1932 | !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN)) |
| 1933 | chtls_abort_conn(sk, skb); |
| 1934 | break; |
| 1935 | default: |
| 1936 | pr_info("close_con_rpl in bad state %d\n", sk->sk_state); |
| 1937 | } |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 1938 | out: |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1939 | kfree_skb(skb); |
| 1940 | } |
| 1941 | |
| 1942 | static struct sk_buff *get_cpl_skb(struct sk_buff *skb, |
| 1943 | size_t len, gfp_t gfp) |
| 1944 | { |
| 1945 | if (likely(!skb_is_nonlinear(skb) && !skb_cloned(skb))) { |
| 1946 | WARN_ONCE(skb->len < len, "skb alloc error"); |
| 1947 | __skb_trim(skb, len); |
| 1948 | skb_get(skb); |
| 1949 | } else { |
| 1950 | skb = alloc_skb(len, gfp); |
| 1951 | if (skb) |
| 1952 | __skb_put(skb, len); |
| 1953 | } |
| 1954 | return skb; |
| 1955 | } |
| 1956 | |
| 1957 | static void set_abort_rpl_wr(struct sk_buff *skb, unsigned int tid, |
| 1958 | int cmd) |
| 1959 | { |
| 1960 | struct cpl_abort_rpl *rpl = cplhdr(skb); |
| 1961 | |
| 1962 | INIT_TP_WR_CPL(rpl, CPL_ABORT_RPL, tid); |
| 1963 | rpl->cmd = cmd; |
| 1964 | } |
| 1965 | |
| 1966 | static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
| 1967 | { |
| 1968 | struct cpl_abort_req_rss *req = cplhdr(skb); |
| 1969 | struct sk_buff *reply_skb; |
| 1970 | |
| 1971 | reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), |
| 1972 | GFP_KERNEL | __GFP_NOFAIL); |
| 1973 | __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); |
| 1974 | set_abort_rpl_wr(reply_skb, GET_TID(req), |
| 1975 | (req->status & CPL_ABORT_NO_RST)); |
| 1976 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, req->status >> 1); |
| 1977 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
| 1978 | kfree_skb(skb); |
| 1979 | } |
| 1980 | |
Vinay Kumar Yadav | c9f0d33 | 2020-01-14 17:58:47 +0530 | [diff] [blame] | 1981 | /* |
| 1982 | * Add an skb to the deferred skb queue for processing from process context. |
| 1983 | */ |
| 1984 | static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev, |
| 1985 | defer_handler_t handler) |
| 1986 | { |
| 1987 | DEFERRED_SKB_CB(skb)->handler = handler; |
| 1988 | spin_lock_bh(&cdev->deferq.lock); |
| 1989 | __skb_queue_tail(&cdev->deferq, skb); |
| 1990 | if (skb_queue_len(&cdev->deferq) == 1) |
| 1991 | schedule_work(&cdev->deferq_task); |
| 1992 | spin_unlock_bh(&cdev->deferq.lock); |
| 1993 | } |
| 1994 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 1995 | static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, |
| 1996 | struct chtls_dev *cdev, int status, int queue) |
| 1997 | { |
| 1998 | struct cpl_abort_req_rss *req = cplhdr(skb); |
| 1999 | struct sk_buff *reply_skb; |
| 2000 | struct chtls_sock *csk; |
| 2001 | |
| 2002 | csk = rcu_dereference_sk_user_data(sk); |
| 2003 | |
| 2004 | reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), |
| 2005 | GFP_KERNEL); |
| 2006 | |
| 2007 | if (!reply_skb) { |
| 2008 | req->status = (queue << 1); |
Vinay Kumar Yadav | c9f0d33 | 2020-01-14 17:58:47 +0530 | [diff] [blame] | 2009 | t4_defer_reply(skb, cdev, send_defer_abort_rpl); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2010 | return; |
| 2011 | } |
| 2012 | |
| 2013 | set_abort_rpl_wr(reply_skb, GET_TID(req), status); |
| 2014 | kfree_skb(skb); |
| 2015 | |
| 2016 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); |
| 2017 | if (csk_conn_inline(csk)) { |
| 2018 | struct l2t_entry *e = csk->l2t_entry; |
| 2019 | |
| 2020 | if (e && sk->sk_state != TCP_SYN_RECV) { |
| 2021 | cxgb4_l2t_send(csk->egress_dev, reply_skb, e); |
| 2022 | return; |
| 2023 | } |
| 2024 | } |
| 2025 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
| 2026 | } |
| 2027 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2028 | static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, |
| 2029 | struct chtls_dev *cdev, |
| 2030 | int status, int queue) |
| 2031 | { |
| 2032 | struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR; |
| 2033 | struct sk_buff *reply_skb; |
| 2034 | struct chtls_sock *csk; |
| 2035 | unsigned int tid; |
| 2036 | |
| 2037 | csk = rcu_dereference_sk_user_data(sk); |
| 2038 | tid = GET_TID(req); |
| 2039 | |
| 2040 | reply_skb = get_cpl_skb(skb, sizeof(struct cpl_abort_rpl), gfp_any()); |
| 2041 | if (!reply_skb) { |
| 2042 | req->status = (queue << 1) | status; |
| 2043 | t4_defer_reply(skb, cdev, send_defer_abort_rpl); |
| 2044 | return; |
| 2045 | } |
| 2046 | |
| 2047 | set_abort_rpl_wr(reply_skb, tid, status); |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 2048 | kfree_skb(skb); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2049 | set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); |
| 2050 | if (csk_conn_inline(csk)) { |
| 2051 | struct l2t_entry *e = csk->l2t_entry; |
| 2052 | |
| 2053 | if (e && sk->sk_state != TCP_SYN_RECV) { |
| 2054 | cxgb4_l2t_send(csk->egress_dev, reply_skb, e); |
| 2055 | return; |
| 2056 | } |
| 2057 | } |
| 2058 | cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2059 | } |
| 2060 | |
| 2061 | /* |
| 2062 | * This is run from a listener's backlog to abort a child connection in |
| 2063 | * SYN_RCV state (i.e., one on the listener's SYN queue). |
| 2064 | */ |
| 2065 | static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) |
| 2066 | { |
| 2067 | struct chtls_sock *csk; |
| 2068 | struct sock *child; |
| 2069 | int queue; |
| 2070 | |
| 2071 | child = skb->sk; |
| 2072 | csk = rcu_dereference_sk_user_data(child); |
| 2073 | queue = csk->txq_idx; |
| 2074 | |
| 2075 | skb->sk = NULL; |
| 2076 | do_abort_syn_rcv(child, lsk); |
| 2077 | send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, |
| 2078 | CPL_ABORT_NO_RST, queue); |
| 2079 | } |
| 2080 | |
| 2081 | static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) |
| 2082 | { |
| 2083 | const struct request_sock *oreq; |
| 2084 | struct listen_ctx *listen_ctx; |
| 2085 | struct chtls_sock *csk; |
| 2086 | struct chtls_dev *cdev; |
| 2087 | struct sock *psk; |
| 2088 | void *ctx; |
| 2089 | |
| 2090 | csk = sk->sk_user_data; |
| 2091 | oreq = csk->passive_reap_next; |
| 2092 | cdev = csk->cdev; |
| 2093 | |
| 2094 | if (!oreq) |
| 2095 | return -1; |
| 2096 | |
| 2097 | ctx = lookup_stid(cdev->tids, oreq->ts_recent); |
| 2098 | if (!ctx) |
| 2099 | return -1; |
| 2100 | |
| 2101 | listen_ctx = (struct listen_ctx *)ctx; |
| 2102 | psk = listen_ctx->lsk; |
| 2103 | |
| 2104 | bh_lock_sock(psk); |
| 2105 | if (!sock_owned_by_user(psk)) { |
| 2106 | int queue = csk->txq_idx; |
| 2107 | |
| 2108 | do_abort_syn_rcv(sk, psk); |
| 2109 | send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); |
| 2110 | } else { |
| 2111 | skb->sk = sk; |
| 2112 | BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; |
| 2113 | __sk_add_backlog(psk, skb); |
| 2114 | } |
| 2115 | bh_unlock_sock(psk); |
| 2116 | return 0; |
| 2117 | } |
| 2118 | |
| 2119 | static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) |
| 2120 | { |
| 2121 | const struct cpl_abort_req_rss *req = cplhdr(skb) + RSS_HDR; |
| 2122 | struct chtls_sock *csk = sk->sk_user_data; |
| 2123 | int rst_status = CPL_ABORT_NO_RST; |
| 2124 | int queue = csk->txq_idx; |
| 2125 | |
| 2126 | if (is_neg_adv(req->status)) { |
| 2127 | if (sk->sk_state == TCP_SYN_RECV) |
| 2128 | chtls_set_tcb_tflag(sk, 0, 0); |
| 2129 | |
| 2130 | kfree_skb(skb); |
| 2131 | return; |
| 2132 | } |
| 2133 | |
| 2134 | csk_reset_flag(csk, CSK_ABORT_REQ_RCVD); |
| 2135 | |
| 2136 | if (!csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN) && |
| 2137 | !csk_flag_nochk(csk, CSK_TX_DATA_SENT)) { |
| 2138 | struct tcp_sock *tp = tcp_sk(sk); |
| 2139 | |
| 2140 | if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0) |
| 2141 | WARN_ONCE(1, "send_tx_flowc error"); |
| 2142 | csk_set_flag(csk, CSK_TX_DATA_SENT); |
| 2143 | } |
| 2144 | |
| 2145 | csk_set_flag(csk, CSK_ABORT_SHUTDOWN); |
| 2146 | |
| 2147 | if (!csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) { |
| 2148 | sk->sk_err = ETIMEDOUT; |
| 2149 | |
| 2150 | if (!sock_flag(sk, SOCK_DEAD)) |
| 2151 | sk->sk_error_report(sk); |
| 2152 | |
| 2153 | if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) |
| 2154 | return; |
| 2155 | |
| 2156 | chtls_release_resources(sk); |
| 2157 | chtls_conn_done(sk); |
| 2158 | } |
| 2159 | |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 2160 | chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev, |
| 2161 | rst_status, queue); |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2162 | } |
| 2163 | |
| 2164 | static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) |
| 2165 | { |
| 2166 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb) + RSS_HDR; |
| 2167 | struct chtls_sock *csk; |
| 2168 | struct chtls_dev *cdev; |
| 2169 | |
| 2170 | csk = rcu_dereference_sk_user_data(sk); |
| 2171 | cdev = csk->cdev; |
| 2172 | |
| 2173 | if (csk_flag_nochk(csk, CSK_ABORT_RPL_PENDING)) { |
| 2174 | csk_reset_flag(csk, CSK_ABORT_RPL_PENDING); |
| 2175 | if (!csk_flag_nochk(csk, CSK_ABORT_REQ_RCVD)) { |
| 2176 | if (sk->sk_state == TCP_SYN_SENT) { |
| 2177 | cxgb4_remove_tid(cdev->tids, |
| 2178 | csk->port_id, |
| 2179 | GET_TID(rpl), |
| 2180 | sk->sk_family); |
| 2181 | sock_put(sk); |
| 2182 | } |
| 2183 | chtls_release_resources(sk); |
| 2184 | chtls_conn_done(sk); |
| 2185 | } |
| 2186 | } |
| 2187 | kfree_skb(skb); |
| 2188 | } |
| 2189 | |
| 2190 | static int chtls_conn_cpl(struct chtls_dev *cdev, struct sk_buff *skb) |
| 2191 | { |
| 2192 | struct cpl_peer_close *req = cplhdr(skb) + RSS_HDR; |
| 2193 | void (*fn)(struct sock *sk, struct sk_buff *skb); |
| 2194 | unsigned int hwtid = GET_TID(req); |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 2195 | struct chtls_sock *csk; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2196 | struct sock *sk; |
| 2197 | u8 opcode; |
| 2198 | |
| 2199 | opcode = ((const struct rss_header *)cplhdr(skb))->opcode; |
| 2200 | |
| 2201 | sk = lookup_tid(cdev->tids, hwtid); |
| 2202 | if (!sk) |
| 2203 | goto rel_skb; |
| 2204 | |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 2205 | csk = sk->sk_user_data; |
| 2206 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2207 | switch (opcode) { |
| 2208 | case CPL_PEER_CLOSE: |
| 2209 | fn = chtls_peer_close; |
| 2210 | break; |
| 2211 | case CPL_CLOSE_CON_RPL: |
| 2212 | fn = chtls_close_con_rpl; |
| 2213 | break; |
| 2214 | case CPL_ABORT_REQ_RSS: |
Rohit Maheshwari | 3a0a978 | 2020-03-30 22:25:55 +0530 | [diff] [blame] | 2215 | /* |
| 2216 | * Save the offload device in the skb, we may process this |
| 2217 | * message after the socket has closed. |
| 2218 | */ |
| 2219 | BLOG_SKB_CB(skb)->cdev = csk->cdev; |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2220 | fn = chtls_abort_req_rss; |
| 2221 | break; |
| 2222 | case CPL_ABORT_RPL_RSS: |
| 2223 | fn = chtls_abort_rpl_rss; |
| 2224 | break; |
| 2225 | default: |
| 2226 | goto rel_skb; |
| 2227 | } |
| 2228 | |
| 2229 | process_cpl_msg(fn, sk, skb); |
| 2230 | return 0; |
| 2231 | |
| 2232 | rel_skb: |
| 2233 | kfree_skb(skb); |
| 2234 | return 0; |
| 2235 | } |
| 2236 | |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2237 | static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb) |
| 2238 | { |
| 2239 | struct cpl_fw4_ack *hdr = cplhdr(skb) + RSS_HDR; |
| 2240 | struct chtls_sock *csk = sk->sk_user_data; |
| 2241 | struct tcp_sock *tp = tcp_sk(sk); |
| 2242 | u32 credits = hdr->credits; |
| 2243 | u32 snd_una; |
| 2244 | |
| 2245 | snd_una = ntohl(hdr->snd_una); |
| 2246 | csk->wr_credits += credits; |
| 2247 | |
| 2248 | if (csk->wr_unacked > csk->wr_max_credits - csk->wr_credits) |
| 2249 | csk->wr_unacked = csk->wr_max_credits - csk->wr_credits; |
| 2250 | |
| 2251 | while (credits) { |
| 2252 | struct sk_buff *pskb = csk->wr_skb_head; |
| 2253 | u32 csum; |
| 2254 | |
| 2255 | if (unlikely(!pskb)) { |
| 2256 | if (csk->wr_nondata) |
| 2257 | csk->wr_nondata -= credits; |
| 2258 | break; |
| 2259 | } |
| 2260 | csum = (__force u32)pskb->csum; |
| 2261 | if (unlikely(credits < csum)) { |
| 2262 | pskb->csum = (__force __wsum)(csum - credits); |
| 2263 | break; |
| 2264 | } |
| 2265 | dequeue_wr(sk); |
| 2266 | credits -= csum; |
| 2267 | kfree_skb(pskb); |
| 2268 | } |
| 2269 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) { |
| 2270 | if (unlikely(before(snd_una, tp->snd_una))) { |
| 2271 | kfree_skb(skb); |
| 2272 | return; |
| 2273 | } |
| 2274 | |
| 2275 | if (tp->snd_una != snd_una) { |
| 2276 | tp->snd_una = snd_una; |
| 2277 | tp->rcv_tstamp = tcp_time_stamp(tp); |
| 2278 | if (tp->snd_una == tp->snd_nxt && |
| 2279 | !csk_flag_nochk(csk, CSK_TX_FAILOVER)) |
| 2280 | csk_reset_flag(csk, CSK_TX_WAIT_IDLE); |
| 2281 | } |
| 2282 | } |
| 2283 | |
| 2284 | if (hdr->seq_vld & CPL_FW4_ACK_FLAGS_CH) { |
| 2285 | unsigned int fclen16 = roundup(failover_flowc_wr_len, 16); |
| 2286 | |
| 2287 | csk->wr_credits -= fclen16; |
| 2288 | csk_reset_flag(csk, CSK_TX_WAIT_IDLE); |
| 2289 | csk_reset_flag(csk, CSK_TX_FAILOVER); |
| 2290 | } |
| 2291 | if (skb_queue_len(&csk->txq) && chtls_push_frames(csk, 0)) |
| 2292 | sk->sk_write_space(sk); |
| 2293 | |
| 2294 | kfree_skb(skb); |
| 2295 | } |
| 2296 | |
| 2297 | static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb) |
| 2298 | { |
| 2299 | struct cpl_fw4_ack *rpl = cplhdr(skb) + RSS_HDR; |
| 2300 | unsigned int hwtid = GET_TID(rpl); |
| 2301 | struct sock *sk; |
| 2302 | |
| 2303 | sk = lookup_tid(cdev->tids, hwtid); |
Gustavo A. R. Silva | 3d8ccf9 | 2018-04-03 15:09:12 -0500 | [diff] [blame] | 2304 | if (unlikely(!sk)) { |
| 2305 | pr_err("can't find conn. for hwtid %u.\n", hwtid); |
| 2306 | return -EINVAL; |
| 2307 | } |
Atul Gupta | cc35c88a | 2018-03-31 21:41:59 +0530 | [diff] [blame] | 2308 | process_cpl_msg(chtls_rx_ack, sk, skb); |
| 2309 | |
| 2310 | return 0; |
| 2311 | } |
| 2312 | |
| 2313 | chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = { |
| 2314 | [CPL_PASS_OPEN_RPL] = chtls_pass_open_rpl, |
| 2315 | [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl, |
| 2316 | [CPL_PASS_ACCEPT_REQ] = chtls_pass_accept_req, |
| 2317 | [CPL_PASS_ESTABLISH] = chtls_pass_establish, |
| 2318 | [CPL_RX_DATA] = chtls_rx_data, |
| 2319 | [CPL_TLS_DATA] = chtls_rx_pdu, |
| 2320 | [CPL_RX_TLS_CMP] = chtls_rx_cmp, |
| 2321 | [CPL_PEER_CLOSE] = chtls_conn_cpl, |
| 2322 | [CPL_CLOSE_CON_RPL] = chtls_conn_cpl, |
| 2323 | [CPL_ABORT_REQ_RSS] = chtls_conn_cpl, |
| 2324 | [CPL_ABORT_RPL_RSS] = chtls_conn_cpl, |
| 2325 | [CPL_FW4_ACK] = chtls_wr_ack, |
| 2326 | }; |