Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 1 | /* |
Ka-Cheong Poon | eee2fa6 | 2018-07-23 20:51:21 -0700 | [diff] [blame] | 2 | * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 35 | #include <linux/in.h> |
| 36 | #include <net/tcp.h> |
| 37 | |
| 38 | #include "rds.h" |
| 39 | #include "tcp.h" |
| 40 | |
Sowmini Varadhan | 467fa15 | 2015-08-05 01:43:26 -0400 | [diff] [blame] | 41 | int rds_tcp_keepalive(struct socket *sock) |
Sowmini Varadhan | f711a6a | 2015-05-05 15:20:51 -0400 | [diff] [blame] | 42 | { |
| 43 | /* values below based on xs_udp_default_timeout */ |
| 44 | int keepidle = 5; /* send a probe 'keepidle' secs after last data */ |
| 45 | int keepcnt = 5; /* number of unack'ed probes before declaring dead */ |
| 46 | int keepalive = 1; |
| 47 | int ret = 0; |
| 48 | |
| 49 | ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, |
| 50 | (char *)&keepalive, sizeof(keepalive)); |
| 51 | if (ret < 0) |
| 52 | goto bail; |
| 53 | |
| 54 | ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT, |
| 55 | (char *)&keepcnt, sizeof(keepcnt)); |
| 56 | if (ret < 0) |
| 57 | goto bail; |
| 58 | |
| 59 | ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, |
| 60 | (char *)&keepidle, sizeof(keepidle)); |
| 61 | if (ret < 0) |
| 62 | goto bail; |
| 63 | |
| 64 | /* KEEPINTVL is the interval between successive probes. We follow |
| 65 | * the model in xs_tcp_finish_connecting() and re-use keepidle. |
| 66 | */ |
| 67 | ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL, |
| 68 | (char *)&keepidle, sizeof(keepidle)); |
| 69 | bail: |
| 70 | return ret; |
| 71 | } |
| 72 | |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 73 | /* rds_tcp_accept_one_path(): if accepting on cp_index > 0, make sure the |
| 74 | * client's ipaddr < server's ipaddr. Otherwise, close the accepted |
| 75 | * socket and force a reconneect from smaller -> larger ip addr. The reason |
| 76 | * we special case cp_index 0 is to allow the rds probe ping itself to itself |
| 77 | * get through efficiently. |
| 78 | * Since reconnects are only initiated from the node with the numerically |
| 79 | * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side |
| 80 | * by moving them to CONNECTING in this function. |
| 81 | */ |
Santosh Shilimkar | bb78976 | 2016-12-04 16:41:29 -0800 | [diff] [blame] | 82 | static |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 83 | struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn) |
| 84 | { |
| 85 | int i; |
Sowmini Varadhan | 1a0e100 | 2016-11-16 13:29:50 -0800 | [diff] [blame] | 86 | int npaths = max_t(int, 1, conn->c_npaths); |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 87 | |
Sowmini Varadhan | 1a0e100 | 2016-11-16 13:29:50 -0800 | [diff] [blame] | 88 | /* for mprds, all paths MUST be initiated by the peer |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 89 | * with the smaller address. |
| 90 | */ |
Ka-Cheong Poon | eee2fa6 | 2018-07-23 20:51:21 -0700 | [diff] [blame] | 91 | if (rds_addr_cmp(&conn->c_faddr, &conn->c_laddr) >= 0) { |
Sowmini Varadhan | 1a0e100 | 2016-11-16 13:29:50 -0800 | [diff] [blame] | 92 | /* Make sure we initiate at least one path if this |
| 93 | * has not already been done; rds_start_mprds() will |
| 94 | * take care of additional paths, if necessary. |
| 95 | */ |
| 96 | if (npaths == 1) |
| 97 | rds_conn_path_connect_if_down(&conn->c_path[0]); |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 98 | return NULL; |
Sowmini Varadhan | 1a0e100 | 2016-11-16 13:29:50 -0800 | [diff] [blame] | 99 | } |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 100 | |
Sowmini Varadhan | 117d15b | 2016-11-04 10:04:12 -0700 | [diff] [blame] | 101 | for (i = 0; i < npaths; i++) { |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 102 | struct rds_conn_path *cp = &conn->c_path[i]; |
| 103 | |
| 104 | if (rds_conn_path_transition(cp, RDS_CONN_DOWN, |
| 105 | RDS_CONN_CONNECTING) || |
| 106 | rds_conn_path_transition(cp, RDS_CONN_ERROR, |
| 107 | RDS_CONN_CONNECTING)) { |
| 108 | return cp->cp_transport_data; |
| 109 | } |
| 110 | } |
| 111 | return NULL; |
| 112 | } |
| 113 | |
Sowmini Varadhan | c14b036 | 2017-06-21 13:40:13 -0700 | [diff] [blame] | 114 | void rds_tcp_set_linger(struct socket *sock) |
Sowmini Varadhan | 10beea7 | 2017-06-15 11:28:55 -0700 | [diff] [blame] | 115 | { |
| 116 | struct linger no_linger = { |
| 117 | .l_onoff = 1, |
| 118 | .l_linger = 0, |
| 119 | }; |
| 120 | |
| 121 | kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, |
| 122 | (char *)&no_linger, sizeof(no_linger)); |
| 123 | } |
| 124 | |
Sowmini Varadhan | 467fa15 | 2015-08-05 01:43:26 -0400 | [diff] [blame] | 125 | int rds_tcp_accept_one(struct socket *sock) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 126 | { |
| 127 | struct socket *new_sock = NULL; |
| 128 | struct rds_connection *conn; |
| 129 | int ret; |
| 130 | struct inet_sock *inet; |
Sowmini Varadhan | bd7c5f9 | 2016-05-02 11:24:52 -0700 | [diff] [blame] | 131 | struct rds_tcp_connection *rs_tcp = NULL; |
| 132 | int conn_state; |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 133 | struct rds_conn_path *cp; |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 134 | struct in6_addr *my_addr, *peer_addr; |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 135 | #if !IS_ENABLED(CONFIG_IPV6) |
| 136 | struct in6_addr saddr, daddr; |
| 137 | #endif |
| 138 | int dev_if = 0; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 139 | |
Sowmini Varadhan | 37e14f4 | 2016-05-18 10:06:23 -0700 | [diff] [blame] | 140 | if (!sock) /* module unload or netns delete in progress */ |
| 141 | return -ENETUNREACH; |
| 142 | |
Sowmini Varadhan | 0933a57 | 2017-07-06 08:15:06 -0700 | [diff] [blame] | 143 | ret = sock_create_lite(sock->sk->sk_family, |
Sowmini Varadhan | d5a8ac2 | 2015-08-05 01:43:25 -0400 | [diff] [blame] | 144 | sock->sk->sk_type, sock->sk->sk_protocol, |
| 145 | &new_sock); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 146 | if (ret) |
| 147 | goto out; |
| 148 | |
David Howells | cdfbabf | 2017-03-09 08:09:05 +0000 | [diff] [blame] | 149 | ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, true); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 150 | if (ret < 0) |
| 151 | goto out; |
| 152 | |
Ka-Cheong Poon | 84eef2b | 2018-03-01 21:07:18 -0800 | [diff] [blame] | 153 | /* sock_create_lite() does not get a hold on the owner module so we |
| 154 | * need to do it here. Note that sock_release() uses sock->ops to |
| 155 | * determine if it needs to decrement the reference count. So set |
| 156 | * sock->ops after calling accept() in case that fails. And there's |
| 157 | * no need to do try_module_get() as the listener should have a hold |
| 158 | * already. |
| 159 | */ |
| 160 | new_sock->ops = sock->ops; |
| 161 | __module_get(new_sock->ops->owner); |
| 162 | |
Sowmini Varadhan | f711a6a | 2015-05-05 15:20:51 -0400 | [diff] [blame] | 163 | ret = rds_tcp_keepalive(new_sock); |
| 164 | if (ret < 0) |
| 165 | goto out; |
| 166 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 167 | rds_tcp_tune(new_sock); |
| 168 | |
| 169 | inet = inet_sk(new_sock->sk); |
| 170 | |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 171 | #if IS_ENABLED(CONFIG_IPV6) |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 172 | my_addr = &new_sock->sk->sk_v6_rcv_saddr; |
| 173 | peer_addr = &new_sock->sk->sk_v6_daddr; |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 174 | #else |
| 175 | ipv6_addr_set_v4mapped(inet->inet_saddr, &saddr); |
| 176 | ipv6_addr_set_v4mapped(inet->inet_daddr, &daddr); |
| 177 | my_addr = &saddr; |
| 178 | peer_addr = &daddr; |
| 179 | #endif |
| 180 | rdsdebug("accepted family %d tcp %pI6c:%u -> %pI6c:%u\n", |
| 181 | sock->sk->sk_family, |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 182 | my_addr, ntohs(inet->inet_sport), |
| 183 | peer_addr, ntohs(inet->inet_dport)); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 184 | |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 185 | #if IS_ENABLED(CONFIG_IPV6) |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 186 | /* sk_bound_dev_if is not set if the peer address is not link local |
| 187 | * address. In this case, it happens that mcast_oif is set. So |
| 188 | * just use it. |
| 189 | */ |
| 190 | if ((ipv6_addr_type(my_addr) & IPV6_ADDR_LINKLOCAL) && |
| 191 | !(ipv6_addr_type(peer_addr) & IPV6_ADDR_LINKLOCAL)) { |
| 192 | struct ipv6_pinfo *inet6; |
| 193 | |
| 194 | inet6 = inet6_sk(new_sock->sk); |
| 195 | dev_if = inet6->mcast_oif; |
| 196 | } else { |
| 197 | dev_if = new_sock->sk->sk_bound_dev_if; |
| 198 | } |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 199 | #endif |
| 200 | |
Sowmini Varadhan | d5a8ac2 | 2015-08-05 01:43:25 -0400 | [diff] [blame] | 201 | conn = rds_conn_create(sock_net(sock->sk), |
Ka-Cheong Poon | e65d4d9 | 2018-07-30 22:48:42 -0700 | [diff] [blame] | 202 | my_addr, peer_addr, |
Santosh Shilimkar | 3eb4503 | 2018-10-23 23:21:14 -0400 | [diff] [blame] | 203 | &rds_tcp_transport, 0, GFP_KERNEL, dev_if); |
Ka-Cheong Poon | eee2fa6 | 2018-07-23 20:51:21 -0700 | [diff] [blame] | 204 | |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 205 | if (IS_ERR(conn)) { |
| 206 | ret = PTR_ERR(conn); |
| 207 | goto out; |
| 208 | } |
Sowmini Varadhan | f711a6a | 2015-05-05 15:20:51 -0400 | [diff] [blame] | 209 | /* An incoming SYN request came in, and TCP just accepted it. |
Sowmini Varadhan | f711a6a | 2015-05-05 15:20:51 -0400 | [diff] [blame] | 210 | * |
| 211 | * If the client reboots, this conn will need to be cleaned up. |
| 212 | * rds_tcp_state_change() will do that cleanup |
| 213 | */ |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 214 | rs_tcp = rds_tcp_accept_one_path(conn); |
| 215 | if (!rs_tcp) |
| 216 | goto rst_nsk; |
Sowmini Varadhan | 02105b2 | 2016-06-30 16:11:12 -0700 | [diff] [blame] | 217 | mutex_lock(&rs_tcp->t_conn_path_lock); |
Sowmini Varadhan | 5916e2c | 2016-07-14 03:51:03 -0700 | [diff] [blame] | 218 | cp = rs_tcp->t_cpath; |
| 219 | conn_state = rds_conn_path_state(cp); |
Sowmini Varadhan | 1a0e100 | 2016-11-16 13:29:50 -0800 | [diff] [blame] | 220 | WARN_ON(conn_state == RDS_CONN_UP); |
| 221 | if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_ERROR) |
Sowmini Varadhan | bd7c5f9 | 2016-05-02 11:24:52 -0700 | [diff] [blame] | 222 | goto rst_nsk; |
Sowmini Varadhan | eb19284 | 2016-05-02 11:24:51 -0700 | [diff] [blame] | 223 | if (rs_tcp->t_sock) { |
Sowmini Varadhan | 41500c3 | 2017-06-15 11:28:53 -0700 | [diff] [blame] | 224 | /* Duelling SYN has been handled in rds_tcp_accept_one() */ |
| 225 | rds_tcp_reset_callbacks(new_sock, cp); |
| 226 | /* rds_connect_path_complete() marks RDS_CONN_UP */ |
| 227 | rds_connect_path_complete(cp, RDS_CONN_RESETTING); |
Sowmini Varadhan | 335b48d | 2016-06-04 13:59:58 -0700 | [diff] [blame] | 228 | } else { |
Sowmini Varadhan | ea3b1ea | 2016-06-30 16:11:14 -0700 | [diff] [blame] | 229 | rds_tcp_set_callbacks(new_sock, cp); |
| 230 | rds_connect_path_complete(cp, RDS_CONN_CONNECTING); |
Sowmini Varadhan | eb19284 | 2016-05-02 11:24:51 -0700 | [diff] [blame] | 231 | } |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 232 | new_sock = NULL; |
| 233 | ret = 0; |
Sowmini Varadhan | 69b92b5 | 2017-06-21 13:40:12 -0700 | [diff] [blame] | 234 | if (conn->c_npaths == 0) |
| 235 | rds_send_ping(cp->cp_conn, cp->cp_index); |
Sowmini Varadhan | bd7c5f9 | 2016-05-02 11:24:52 -0700 | [diff] [blame] | 236 | goto out; |
| 237 | rst_nsk: |
Sowmini Varadhan | 10beea7 | 2017-06-15 11:28:55 -0700 | [diff] [blame] | 238 | /* reset the newly returned accept sock and bail. |
| 239 | * It is safe to set linger on new_sock because the RDS connection |
| 240 | * has not been brought up on new_sock, so no RDS-level data could |
| 241 | * be pending on it. By setting linger, we achieve the side-effect |
| 242 | * of avoiding TIME_WAIT state on new_sock. |
| 243 | */ |
| 244 | rds_tcp_set_linger(new_sock); |
Sowmini Varadhan | 335b48d | 2016-06-04 13:59:58 -0700 | [diff] [blame] | 245 | kernel_sock_shutdown(new_sock, SHUT_RDWR); |
Sowmini Varadhan | bd7c5f9 | 2016-05-02 11:24:52 -0700 | [diff] [blame] | 246 | ret = 0; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 247 | out: |
Sowmini Varadhan | bd7c5f9 | 2016-05-02 11:24:52 -0700 | [diff] [blame] | 248 | if (rs_tcp) |
Sowmini Varadhan | 02105b2 | 2016-06-30 16:11:12 -0700 | [diff] [blame] | 249 | mutex_unlock(&rs_tcp->t_conn_path_lock); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 250 | if (new_sock) |
| 251 | sock_release(new_sock); |
| 252 | return ret; |
| 253 | } |
| 254 | |
David S. Miller | 676d236 | 2014-04-11 16:15:36 -0400 | [diff] [blame] | 255 | void rds_tcp_listen_data_ready(struct sock *sk) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 256 | { |
David S. Miller | 676d236 | 2014-04-11 16:15:36 -0400 | [diff] [blame] | 257 | void (*ready)(struct sock *sk); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 258 | |
| 259 | rdsdebug("listen data ready sk %p\n", sk); |
| 260 | |
Eric Dumazet | 3803662 | 2016-05-17 17:44:08 -0700 | [diff] [blame] | 261 | read_lock_bh(&sk->sk_callback_lock); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 262 | ready = sk->sk_user_data; |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 263 | if (!ready) { /* check for teardown race */ |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 264 | ready = sk->sk_data_ready; |
| 265 | goto out; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * ->sk_data_ready is also called for a newly established child socket |
| 270 | * before it has been accepted and the accepter has set up their |
| 271 | * data_ready.. we only want to queue listen work for our listening |
| 272 | * socket |
Sowmini Varadhan | b21dd45 | 2017-03-04 08:57:35 -0800 | [diff] [blame] | 273 | * |
| 274 | * (*ready)() may be null if we are racing with netns delete, and |
| 275 | * the listen socket is being torn down. |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 276 | */ |
| 277 | if (sk->sk_state == TCP_LISTEN) |
Sowmini Varadhan | 467fa15 | 2015-08-05 01:43:26 -0400 | [diff] [blame] | 278 | rds_tcp_accept_work(sk); |
Sowmini Varadhan | a93d01f | 2016-07-14 03:51:01 -0700 | [diff] [blame] | 279 | else |
| 280 | ready = rds_tcp_listen_sock_def_readable(sock_net(sk)); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 281 | |
| 282 | out: |
Eric Dumazet | 3803662 | 2016-05-17 17:44:08 -0700 | [diff] [blame] | 283 | read_unlock_bh(&sk->sk_callback_lock); |
Sowmini Varadhan | b21dd45 | 2017-03-04 08:57:35 -0800 | [diff] [blame] | 284 | if (ready) |
| 285 | ready(sk); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 286 | } |
| 287 | |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 288 | struct socket *rds_tcp_listen_init(struct net *net, bool isv6) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 289 | { |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 290 | struct socket *sock = NULL; |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 291 | struct sockaddr_storage ss; |
| 292 | struct sockaddr_in6 *sin6; |
| 293 | struct sockaddr_in *sin; |
| 294 | int addr_len; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 295 | int ret; |
| 296 | |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 297 | ret = sock_create_kern(net, isv6 ? PF_INET6 : PF_INET, SOCK_STREAM, |
| 298 | IPPROTO_TCP, &sock); |
| 299 | if (ret < 0) { |
| 300 | rdsdebug("could not create %s listener socket: %d\n", |
| 301 | isv6 ? "IPv6" : "IPv4", ret); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 302 | goto out; |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 303 | } |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 304 | |
Pavel Emelyanov | 4a17fd5 | 2012-04-19 03:39:36 +0000 | [diff] [blame] | 305 | sock->sk->sk_reuse = SK_CAN_REUSE; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 306 | rds_tcp_nonagle(sock); |
| 307 | |
| 308 | write_lock_bh(&sock->sk->sk_callback_lock); |
| 309 | sock->sk->sk_user_data = sock->sk->sk_data_ready; |
| 310 | sock->sk->sk_data_ready = rds_tcp_listen_data_ready; |
| 311 | write_unlock_bh(&sock->sk->sk_callback_lock); |
| 312 | |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 313 | if (isv6) { |
| 314 | sin6 = (struct sockaddr_in6 *)&ss; |
| 315 | sin6->sin6_family = PF_INET6; |
| 316 | sin6->sin6_addr = in6addr_any; |
| 317 | sin6->sin6_port = (__force u16)htons(RDS_TCP_PORT); |
| 318 | sin6->sin6_scope_id = 0; |
| 319 | sin6->sin6_flowinfo = 0; |
| 320 | addr_len = sizeof(*sin6); |
| 321 | } else { |
| 322 | sin = (struct sockaddr_in *)&ss; |
| 323 | sin->sin_family = PF_INET; |
| 324 | sin->sin_addr.s_addr = INADDR_ANY; |
| 325 | sin->sin_port = (__force u16)htons(RDS_TCP_PORT); |
| 326 | addr_len = sizeof(*sin); |
| 327 | } |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 328 | |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 329 | ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len); |
| 330 | if (ret < 0) { |
| 331 | rdsdebug("could not bind %s listener socket: %d\n", |
| 332 | isv6 ? "IPv6" : "IPv4", ret); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 333 | goto out; |
Ka-Cheong Poon | 1e2b44e | 2018-07-23 20:51:22 -0700 | [diff] [blame] | 334 | } |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 335 | |
| 336 | ret = sock->ops->listen(sock, 64); |
| 337 | if (ret < 0) |
| 338 | goto out; |
| 339 | |
Sowmini Varadhan | 467fa15 | 2015-08-05 01:43:26 -0400 | [diff] [blame] | 340 | return sock; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 341 | out: |
| 342 | if (sock) |
| 343 | sock_release(sock); |
Sowmini Varadhan | 467fa15 | 2015-08-05 01:43:26 -0400 | [diff] [blame] | 344 | return NULL; |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 345 | } |
| 346 | |
Sowmini Varadhan | b21dd45 | 2017-03-04 08:57:35 -0800 | [diff] [blame] | 347 | void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 348 | { |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 349 | struct sock *sk; |
| 350 | |
Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 351 | if (!sock) |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 352 | return; |
| 353 | |
| 354 | sk = sock->sk; |
| 355 | |
| 356 | /* serialize with and prevent further callbacks */ |
| 357 | lock_sock(sk); |
| 358 | write_lock_bh(&sk->sk_callback_lock); |
| 359 | if (sk->sk_user_data) { |
| 360 | sk->sk_data_ready = sk->sk_user_data; |
| 361 | sk->sk_user_data = NULL; |
| 362 | } |
| 363 | write_unlock_bh(&sk->sk_callback_lock); |
| 364 | release_sock(sk); |
| 365 | |
| 366 | /* wait for accepts to stop and close the socket */ |
| 367 | flush_workqueue(rds_wq); |
Sowmini Varadhan | b21dd45 | 2017-03-04 08:57:35 -0800 | [diff] [blame] | 368 | flush_work(acceptor); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 369 | sock_release(sock); |
Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 370 | } |