Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 2 | /* |
Fabian Frederick | b92022f | 2014-11-04 20:25:38 +0100 | [diff] [blame] | 3 | * Pluggable TCP congestion control support and newReno |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 4 | * congestion control. |
Masanari Iida | 02582e9 | 2012-08-22 19:11:26 +0900 | [diff] [blame] | 5 | * Based on ideas from I/O scheduler support and Web100. |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 6 | * |
| 7 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> |
| 8 | */ |
| 9 | |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 10 | #define pr_fmt(fmt) "TCP: " fmt |
| 11 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/list.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 17 | #include <linux/jhash.h> |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 18 | #include <net/tcp.h> |
Ping Gan | 15fcdf6 | 2022-04-06 09:09:56 +0800 | [diff] [blame] | 19 | #include <trace/events/tcp.h> |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 20 | |
| 21 | static DEFINE_SPINLOCK(tcp_cong_list_lock); |
| 22 | static LIST_HEAD(tcp_cong_list); |
| 23 | |
| 24 | /* Simple linear search, don't expect many entries! */ |
Martin KaFai Lau | 0baf26b | 2020-01-08 16:35:08 -0800 | [diff] [blame] | 25 | struct tcp_congestion_ops *tcp_ca_find(const char *name) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 26 | { |
| 27 | struct tcp_congestion_ops *e; |
| 28 | |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 29 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 30 | if (strcmp(e->name, name) == 0) |
| 31 | return e; |
| 32 | } |
| 33 | |
| 34 | return NULL; |
| 35 | } |
| 36 | |
Ping Gan | 15fcdf6 | 2022-04-06 09:09:56 +0800 | [diff] [blame] | 37 | void tcp_set_ca_state(struct sock *sk, const u8 ca_state) |
| 38 | { |
| 39 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 40 | |
| 41 | trace_tcp_cong_state_set(sk, ca_state); |
| 42 | |
| 43 | if (icsk->icsk_ca_ops->set_state) |
| 44 | icsk->icsk_ca_ops->set_state(sk, ca_state); |
| 45 | icsk->icsk_ca_state = ca_state; |
| 46 | } |
| 47 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 48 | /* Must be called with rcu lock held */ |
Jason Xing | 61e2bba | 2024-05-31 23:46:34 +0800 | [diff] [blame] | 49 | static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name) |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 50 | { |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 51 | struct tcp_congestion_ops *ca = tcp_ca_find(name); |
| 52 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 53 | #ifdef CONFIG_MODULES |
| 54 | if (!ca && capable(CAP_NET_ADMIN)) { |
| 55 | rcu_read_unlock(); |
| 56 | request_module("tcp_%s", name); |
| 57 | rcu_read_lock(); |
| 58 | ca = tcp_ca_find(name); |
| 59 | } |
| 60 | #endif |
| 61 | return ca; |
| 62 | } |
| 63 | |
| 64 | /* Simple linear search, not much in here. */ |
| 65 | struct tcp_congestion_ops *tcp_ca_find_key(u32 key) |
| 66 | { |
| 67 | struct tcp_congestion_ops *e; |
| 68 | |
| 69 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
| 70 | if (e->key == key) |
| 71 | return e; |
| 72 | } |
| 73 | |
| 74 | return NULL; |
| 75 | } |
| 76 | |
Kui-Feng Lee | 8fb1a76 | 2023-03-22 20:23:59 -0700 | [diff] [blame] | 77 | int tcp_validate_congestion_control(struct tcp_congestion_ops *ca) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 78 | { |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 79 | /* all algorithms must implement these */ |
| 80 | if (!ca->ssthresh || !ca->undo_cwnd || |
| 81 | !(ca->cong_avoid || ca->cong_control)) { |
Joe Perches | afd46503 | 2012-03-12 07:03:32 +0000 | [diff] [blame] | 82 | pr_err("%s does not implement required ops\n", ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 83 | return -EINVAL; |
| 84 | } |
| 85 | |
Kui-Feng Lee | 8fb1a76 | 2023-03-22 20:23:59 -0700 | [diff] [blame] | 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | /* Attach new congestion control algorithm to the list |
| 90 | * of available options. |
| 91 | */ |
| 92 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) |
| 93 | { |
| 94 | int ret; |
| 95 | |
| 96 | ret = tcp_validate_congestion_control(ca); |
| 97 | if (ret) |
| 98 | return ret; |
| 99 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 100 | ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name)); |
| 101 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 102 | spin_lock(&tcp_cong_list_lock); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 103 | if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) { |
| 104 | pr_notice("%s already registered or non-unique key\n", |
| 105 | ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 106 | ret = -EEXIST; |
| 107 | } else { |
Stephen Hemminger | 3d2573f | 2006-09-24 20:11:58 -0700 | [diff] [blame] | 108 | list_add_tail_rcu(&ca->list, &tcp_cong_list); |
stephen hemminger | db2855a | 2015-02-16 09:38:13 -0500 | [diff] [blame] | 109 | pr_debug("%s registered\n", ca->name); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 110 | } |
| 111 | spin_unlock(&tcp_cong_list_lock); |
| 112 | |
| 113 | return ret; |
| 114 | } |
| 115 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); |
| 116 | |
| 117 | /* |
| 118 | * Remove congestion control algorithm, called from |
| 119 | * the module's remove function. Module ref counts are used |
| 120 | * to ensure that this can't be done till all sockets using |
| 121 | * that method are closed. |
| 122 | */ |
| 123 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) |
| 124 | { |
| 125 | spin_lock(&tcp_cong_list_lock); |
| 126 | list_del_rcu(&ca->list); |
| 127 | spin_unlock(&tcp_cong_list_lock); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 128 | |
| 129 | /* Wait for outstanding readers to complete before the |
| 130 | * module gets removed entirely. |
| 131 | * |
| 132 | * A try_module_get() should fail by now as our module is |
| 133 | * in "going" state since no refs are held anymore and |
| 134 | * module_exit() handler being called. |
| 135 | */ |
| 136 | synchronize_rcu(); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 137 | } |
| 138 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); |
| 139 | |
Kui-Feng Lee | 8fb1a76 | 2023-03-22 20:23:59 -0700 | [diff] [blame] | 140 | /* Replace a registered old ca with a new one. |
| 141 | * |
| 142 | * The new ca must have the same name as the old one, that has been |
| 143 | * registered. |
| 144 | */ |
| 145 | int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca) |
| 146 | { |
| 147 | struct tcp_congestion_ops *existing; |
Kui-Feng Lee | 73e4f9e | 2024-02-24 14:34:16 -0800 | [diff] [blame] | 148 | int ret = 0; |
Kui-Feng Lee | 8fb1a76 | 2023-03-22 20:23:59 -0700 | [diff] [blame] | 149 | |
| 150 | ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name)); |
| 151 | |
| 152 | spin_lock(&tcp_cong_list_lock); |
| 153 | existing = tcp_ca_find_key(old_ca->key); |
| 154 | if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) { |
| 155 | pr_notice("%s not registered or non-unique key\n", |
| 156 | ca->name); |
| 157 | ret = -EINVAL; |
| 158 | } else if (existing != old_ca) { |
| 159 | pr_notice("invalid old congestion control algorithm to replace\n"); |
| 160 | ret = -EINVAL; |
| 161 | } else { |
| 162 | /* Add the new one before removing the old one to keep |
| 163 | * one implementation available all the time. |
| 164 | */ |
| 165 | list_add_tail_rcu(&ca->list, &tcp_cong_list); |
| 166 | list_del_rcu(&existing->list); |
| 167 | pr_debug("%s updated\n", ca->name); |
| 168 | } |
| 169 | spin_unlock(&tcp_cong_list_lock); |
| 170 | |
| 171 | /* Wait for outstanding readers to complete before the |
| 172 | * module or struct_ops gets removed entirely. |
| 173 | */ |
| 174 | if (!ret) |
| 175 | synchronize_rcu(); |
| 176 | |
| 177 | return ret; |
| 178 | } |
| 179 | |
Jason Xing | 61e2bba | 2024-05-31 23:46:34 +0800 | [diff] [blame] | 180 | u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca) |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 181 | { |
| 182 | const struct tcp_congestion_ops *ca; |
Daniel Borkmann | c3a8d94 | 2015-08-31 15:58:47 +0200 | [diff] [blame] | 183 | u32 key = TCP_CA_UNSPEC; |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 184 | |
| 185 | might_sleep(); |
| 186 | |
| 187 | rcu_read_lock(); |
Jason Xing | 61e2bba | 2024-05-31 23:46:34 +0800 | [diff] [blame] | 188 | ca = tcp_ca_find_autoload(name); |
Daniel Borkmann | c3a8d94 | 2015-08-31 15:58:47 +0200 | [diff] [blame] | 189 | if (ca) { |
| 190 | key = ca->key; |
| 191 | *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN; |
| 192 | } |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 193 | rcu_read_unlock(); |
| 194 | |
| 195 | return key; |
| 196 | } |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 197 | |
| 198 | char *tcp_ca_get_name_by_key(u32 key, char *buffer) |
| 199 | { |
| 200 | const struct tcp_congestion_ops *ca; |
| 201 | char *ret = NULL; |
| 202 | |
| 203 | rcu_read_lock(); |
| 204 | ca = tcp_ca_find_key(key); |
Kees Cook | a3bfc09 | 2024-07-13 21:11:15 -0700 | [diff] [blame] | 205 | if (ca) { |
| 206 | strscpy(buffer, ca->name, TCP_CA_NAME_MAX); |
| 207 | ret = buffer; |
| 208 | } |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 209 | rcu_read_unlock(); |
| 210 | |
| 211 | return ret; |
| 212 | } |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 213 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 214 | /* Assign choice of congestion control. */ |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 215 | void tcp_assign_congestion_control(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 216 | { |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 217 | struct net *net = sock_net(sk); |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 218 | struct inet_connection_sock *icsk = inet_csk(sk); |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 219 | const struct tcp_congestion_ops *ca; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 220 | |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 221 | rcu_read_lock(); |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 222 | ca = rcu_dereference(net->ipv4.tcp_congestion_control); |
Martin KaFai Lau | 0baf26b | 2020-01-08 16:35:08 -0800 | [diff] [blame] | 223 | if (unlikely(!bpf_try_module_get(ca, ca->owner))) |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 224 | ca = &tcp_reno; |
| 225 | icsk->icsk_ca_ops = ca; |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 226 | rcu_read_unlock(); |
| 227 | |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 228 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 229 | if (ca->flags & TCP_CONG_NEEDS_ECN) |
| 230 | INET_ECN_xmit(sk); |
| 231 | else |
| 232 | INET_ECN_dontxmit(sk); |
Florian Westphal | 55d8694 | 2014-09-26 22:37:32 +0200 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | void tcp_init_congestion_control(struct sock *sk) |
| 236 | { |
Neal Cardwell | 8919a9b | 2020-09-10 15:35:32 -0400 | [diff] [blame] | 237 | struct inet_connection_sock *icsk = inet_csk(sk); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 238 | |
Yuchung Cheng | 44abafc | 2017-05-31 11:21:27 -0700 | [diff] [blame] | 239 | tcp_sk(sk)->prior_ssthresh = 0; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 240 | if (icsk->icsk_ca_ops->init) |
| 241 | icsk->icsk_ca_ops->init(sk); |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 242 | if (tcp_ca_needs_ecn(sk)) |
| 243 | INET_ECN_xmit(sk); |
| 244 | else |
| 245 | INET_ECN_dontxmit(sk); |
Neal Cardwell | 8919a9b | 2020-09-10 15:35:32 -0400 | [diff] [blame] | 246 | icsk->icsk_ca_initialized = 1; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Sabrina Dubroca | ebfa00c | 2017-08-25 13:10:12 +0200 | [diff] [blame] | 249 | static void tcp_reinit_congestion_control(struct sock *sk, |
| 250 | const struct tcp_congestion_ops *ca) |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 251 | { |
| 252 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 253 | |
| 254 | tcp_cleanup_congestion_control(sk); |
| 255 | icsk->icsk_ca_ops = ca; |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 256 | icsk->icsk_ca_setsockopt = 1; |
Wei Wang | c120144 | 2017-04-25 17:38:02 -0700 | [diff] [blame] | 257 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 258 | |
Alexander Duyck | 5547201 | 2020-11-19 13:23:58 -0800 | [diff] [blame] | 259 | if (ca->flags & TCP_CONG_NEEDS_ECN) |
| 260 | INET_ECN_xmit(sk); |
| 261 | else |
| 262 | INET_ECN_dontxmit(sk); |
| 263 | |
Christoph Paasch | ce69e56 | 2020-07-08 16:18:34 -0700 | [diff] [blame] | 264 | if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
Eric Dumazet | 6ac705b | 2015-09-25 07:39:18 -0700 | [diff] [blame] | 265 | tcp_init_congestion_control(sk); |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 266 | } |
| 267 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 268 | /* Manage refcounts on socket close. */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 269 | void tcp_cleanup_congestion_control(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 270 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 271 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 272 | |
| 273 | if (icsk->icsk_ca_ops->release) |
| 274 | icsk->icsk_ca_ops->release(sk); |
Martin KaFai Lau | 0baf26b | 2020-01-08 16:35:08 -0800 | [diff] [blame] | 275 | bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | /* Used by sysctl to change default congestion control */ |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 279 | int tcp_set_default_congestion_control(struct net *net, const char *name) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 280 | { |
| 281 | struct tcp_congestion_ops *ca; |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 282 | const struct tcp_congestion_ops *prev; |
| 283 | int ret; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 284 | |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 285 | rcu_read_lock(); |
Jason Xing | 61e2bba | 2024-05-31 23:46:34 +0800 | [diff] [blame] | 286 | ca = tcp_ca_find_autoload(name); |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 287 | if (!ca) { |
| 288 | ret = -ENOENT; |
Martin KaFai Lau | 0baf26b | 2020-01-08 16:35:08 -0800 | [diff] [blame] | 289 | } else if (!bpf_try_module_get(ca, ca->owner)) { |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 290 | ret = -EBUSY; |
Jonathon Reinhart | 8d43259 | 2021-05-01 04:28:22 -0400 | [diff] [blame] | 291 | } else if (!net_eq(net, &init_net) && |
| 292 | !(ca->flags & TCP_CONG_NON_RESTRICTED)) { |
| 293 | /* Only init netns can set default to a restricted algorithm */ |
| 294 | ret = -EPERM; |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 295 | } else { |
| 296 | prev = xchg(&net->ipv4.tcp_congestion_control, ca); |
| 297 | if (prev) |
Martin KaFai Lau | 0baf26b | 2020-01-08 16:35:08 -0800 | [diff] [blame] | 298 | bpf_module_put(prev, prev->owner); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 299 | |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 300 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 301 | ret = 0; |
| 302 | } |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 303 | rcu_read_unlock(); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 304 | |
| 305 | return ret; |
| 306 | } |
| 307 | |
Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 308 | /* Set default value from kernel configuration at bootup */ |
| 309 | static int __init tcp_congestion_default(void) |
| 310 | { |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 311 | return tcp_set_default_congestion_control(&init_net, |
| 312 | CONFIG_DEFAULT_TCP_CONG); |
Stephen Hemminger | b1736a7 | 2006-10-31 17:31:33 -0800 | [diff] [blame] | 313 | } |
| 314 | late_initcall(tcp_congestion_default); |
| 315 | |
Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 316 | /* Build string with list of available congestion control values */ |
| 317 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) |
| 318 | { |
| 319 | struct tcp_congestion_ops *ca; |
| 320 | size_t offs = 0; |
| 321 | |
| 322 | rcu_read_lock(); |
| 323 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
| 324 | offs += snprintf(buf + offs, maxlen - offs, |
| 325 | "%s%s", |
| 326 | offs == 0 ? "" : " ", ca->name); |
Hangbin Liu | 9bb59a2 | 2019-11-20 16:38:08 +0800 | [diff] [blame] | 327 | |
| 328 | if (WARN_ON_ONCE(offs >= maxlen)) |
| 329 | break; |
Stephen Hemminger | 3ff825b | 2006-11-09 16:32:06 -0800 | [diff] [blame] | 330 | } |
| 331 | rcu_read_unlock(); |
| 332 | } |
| 333 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 334 | /* Get current default congestion control */ |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 335 | void tcp_get_default_congestion_control(struct net *net, char *name) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 336 | { |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 337 | const struct tcp_congestion_ops *ca; |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 338 | |
| 339 | rcu_read_lock(); |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 340 | ca = rcu_dereference(net->ipv4.tcp_congestion_control); |
Kees Cook | a3bfc09 | 2024-07-13 21:11:15 -0700 | [diff] [blame] | 341 | strscpy(name, ca->name, TCP_CA_NAME_MAX); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 342 | rcu_read_unlock(); |
| 343 | } |
| 344 | |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 345 | /* Built list of non-restricted congestion control values */ |
| 346 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) |
| 347 | { |
| 348 | struct tcp_congestion_ops *ca; |
| 349 | size_t offs = 0; |
| 350 | |
| 351 | *buf = '\0'; |
| 352 | rcu_read_lock(); |
| 353 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 354 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 355 | continue; |
| 356 | offs += snprintf(buf + offs, maxlen - offs, |
| 357 | "%s%s", |
| 358 | offs == 0 ? "" : " ", ca->name); |
Hangbin Liu | 9bb59a2 | 2019-11-20 16:38:08 +0800 | [diff] [blame] | 359 | |
| 360 | if (WARN_ON_ONCE(offs >= maxlen)) |
| 361 | break; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 362 | } |
| 363 | rcu_read_unlock(); |
| 364 | } |
| 365 | |
| 366 | /* Change list of non-restricted congestion control */ |
| 367 | int tcp_set_allowed_congestion_control(char *val) |
| 368 | { |
| 369 | struct tcp_congestion_ops *ca; |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 370 | char *saved_clone, *clone, *name; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 371 | int ret = 0; |
| 372 | |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 373 | saved_clone = clone = kstrdup(val, GFP_USER); |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 374 | if (!clone) |
| 375 | return -ENOMEM; |
| 376 | |
| 377 | spin_lock(&tcp_cong_list_lock); |
| 378 | /* pass 1 check for bad entries */ |
| 379 | while ((name = strsep(&clone, " ")) && *name) { |
| 380 | ca = tcp_ca_find(name); |
| 381 | if (!ca) { |
| 382 | ret = -ENOENT; |
| 383 | goto out; |
| 384 | } |
| 385 | } |
| 386 | |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 387 | /* pass 2 clear old values */ |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 388 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 389 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 390 | |
| 391 | /* pass 3 mark as allowed */ |
| 392 | while ((name = strsep(&val, " ")) && *name) { |
| 393 | ca = tcp_ca_find(name); |
| 394 | WARN_ON(!ca); |
| 395 | if (ca) |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 396 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 397 | } |
| 398 | out: |
| 399 | spin_unlock(&tcp_cong_list_lock); |
Julia Lawall | c34186e | 2010-08-27 19:31:56 -0700 | [diff] [blame] | 400 | kfree(saved_clone); |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 401 | |
| 402 | return ret; |
| 403 | } |
| 404 | |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 405 | /* Change congestion control for socket. If load is false, then it is the |
| 406 | * responsibility of the caller to call tcp_init_congestion_control or |
| 407 | * tcp_reinit_congestion_control (if the current congestion control was |
| 408 | * already initialized. |
| 409 | */ |
Eric Dumazet | 8d650cd | 2019-07-18 19:28:14 -0700 | [diff] [blame] | 410 | int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, |
Neal Cardwell | 29a9493 | 2020-09-10 15:35:34 -0400 | [diff] [blame] | 411 | bool cap_net_admin) |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 412 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 413 | struct inet_connection_sock *icsk = inet_csk(sk); |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 414 | const struct tcp_congestion_ops *ca; |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 415 | int err = 0; |
| 416 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 417 | if (icsk->icsk_ca_dst_locked) |
| 418 | return -EPERM; |
Stephen Hemminger | 4d4d3d1 | 2007-04-23 22:32:11 -0700 | [diff] [blame] | 419 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 420 | rcu_read_lock(); |
Lawrence Brakmo | 91b5b21 | 2017-06-30 20:02:49 -0700 | [diff] [blame] | 421 | if (!load) |
| 422 | ca = tcp_ca_find(name); |
| 423 | else |
Jason Xing | 61e2bba | 2024-05-31 23:46:34 +0800 | [diff] [blame] | 424 | ca = tcp_ca_find_autoload(name); |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 425 | |
Daniel Borkmann | c5c6a8a | 2015-01-05 23:57:46 +0100 | [diff] [blame] | 426 | /* No change asking for existing value */ |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 427 | if (ca == icsk->icsk_ca_ops) { |
| 428 | icsk->icsk_ca_setsockopt = 1; |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 429 | goto out; |
Neal Cardwell | 9f95041 | 2015-05-29 13:47:07 -0400 | [diff] [blame] | 430 | } |
Stephen Hemminger | 6670e15 | 2017-11-14 08:25:49 -0800 | [diff] [blame] | 431 | |
Neal Cardwell | 5050bef | 2020-09-10 15:35:36 -0400 | [diff] [blame] | 432 | if (!ca) |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 433 | err = -ENOENT; |
Neal Cardwell | 5050bef | 2020-09-10 15:35:36 -0400 | [diff] [blame] | 434 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) |
Stephen Hemminger | ce7bc3b | 2006-11-09 16:35:15 -0800 | [diff] [blame] | 435 | err = -EPERM; |
Neal Cardwell | 5050bef | 2020-09-10 15:35:36 -0400 | [diff] [blame] | 436 | else if (!bpf_try_module_get(ca, ca->owner)) |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 437 | err = -EBUSY; |
Neal Cardwell | 5050bef | 2020-09-10 15:35:36 -0400 | [diff] [blame] | 438 | else |
Daniel Borkmann | 29ba4fff | 2015-01-05 23:57:45 +0100 | [diff] [blame] | 439 | tcp_reinit_congestion_control(sk, ca); |
Stephen Hemminger | 5f8ef48 | 2005-06-23 20:37:36 -0700 | [diff] [blame] | 440 | out: |
| 441 | rcu_read_unlock(); |
| 442 | return err; |
| 443 | } |
| 444 | |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 445 | /* Slow start is used when congestion window is no greater than the slow start |
| 446 | * threshold. We base on RFC2581 and also handle stretch ACKs properly. |
| 447 | * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but |
| 448 | * something better;) a packet is only considered (s)acked in its entirety to |
| 449 | * defend the ACK attacks described in the RFC. Slow start processes a stretch |
| 450 | * ACK of degree N as if N acks of degree 1 are received back to back except |
| 451 | * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and |
| 452 | * returns the leftover acks to adjust cwnd in congestion avoidance mode. |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 453 | */ |
David Vernet | 400031e | 2023-02-01 11:30:15 -0600 | [diff] [blame] | 454 | __bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 455 | { |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 456 | u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); |
Eric Dumazet | 973ec44 | 2013-02-02 05:23:16 +0000 | [diff] [blame] | 457 | |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 458 | acked -= cwnd - tcp_snd_cwnd(tp); |
| 459 | tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); |
Neal Cardwell | e73ebb0 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 460 | |
| 461 | return acked; |
Stephen Hemminger | 40efc6f | 2006-01-03 16:03:49 -0800 | [diff] [blame] | 462 | } |
| 463 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
| 464 | |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 465 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), |
| 466 | * for every packet that was ACKed. |
| 467 | */ |
David Vernet | 400031e | 2023-02-01 11:30:15 -0600 | [diff] [blame] | 468 | __bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 469 | { |
Neal Cardwell | 9949afa | 2015-03-10 17:17:03 -0400 | [diff] [blame] | 470 | /* If credits accumulated at a higher w, apply them gently now. */ |
| 471 | if (tp->snd_cwnd_cnt >= w) { |
| 472 | tp->snd_cwnd_cnt = 0; |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 473 | tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); |
Neal Cardwell | 9949afa | 2015-03-10 17:17:03 -0400 | [diff] [blame] | 474 | } |
| 475 | |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 476 | tp->snd_cwnd_cnt += acked; |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 477 | if (tp->snd_cwnd_cnt >= w) { |
Neal Cardwell | 814d488 | 2015-01-28 20:01:36 -0500 | [diff] [blame] | 478 | u32 delta = tp->snd_cwnd_cnt / w; |
| 479 | |
| 480 | tp->snd_cwnd_cnt -= delta * w; |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 481 | tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta); |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 482 | } |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 483 | tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp)); |
Ilpo Järvinen | 758ce5c | 2009-02-28 04:44:37 +0000 | [diff] [blame] | 484 | } |
| 485 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); |
| 486 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 487 | /* |
| 488 | * TCP Reno congestion control |
| 489 | * This is special case used for fallback as well. |
| 490 | */ |
| 491 | /* This is Jacobson's slow start and congestion avoidance. |
| 492 | * SIGCOMM '88, p. 328. |
| 493 | */ |
David Vernet | 400031e | 2023-02-01 11:30:15 -0600 | [diff] [blame] | 494 | __bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 495 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 496 | struct tcp_sock *tp = tcp_sk(sk); |
| 497 | |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 498 | if (!tcp_is_cwnd_limited(sk)) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 499 | return; |
| 500 | |
Stephen Hemminger | 7faffa1c | 2005-11-10 17:07:24 -0800 | [diff] [blame] | 501 | /* In "safe" area, increase. */ |
Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 502 | if (tcp_in_slow_start(tp)) { |
Neal Cardwell | c22bdca | 2015-01-28 20:01:37 -0500 | [diff] [blame] | 503 | acked = tcp_slow_start(tp, acked); |
| 504 | if (!acked) |
| 505 | return; |
| 506 | } |
YOSHIFUJI Hideaki | e905a9e | 2007-02-09 23:24:47 +0900 | [diff] [blame] | 507 | /* In dangerous area, increase slowly. */ |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 508 | tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 509 | } |
| 510 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); |
| 511 | |
| 512 | /* Slow start threshold is half the congestion window (min 2) */ |
David Vernet | 400031e | 2023-02-01 11:30:15 -0600 | [diff] [blame] | 513 | __bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk) |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 514 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 515 | const struct tcp_sock *tp = tcp_sk(sk); |
stephen hemminger | 688d194 | 2014-08-29 23:32:05 -0700 | [diff] [blame] | 516 | |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 517 | return max(tcp_snd_cwnd(tp) >> 1U, 2U); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 518 | } |
| 519 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); |
| 520 | |
David Vernet | 400031e | 2023-02-01 11:30:15 -0600 | [diff] [blame] | 521 | __bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk) |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 522 | { |
| 523 | const struct tcp_sock *tp = tcp_sk(sk); |
| 524 | |
Eric Dumazet | 4057037 | 2022-04-05 16:35:38 -0700 | [diff] [blame] | 525 | return max(tcp_snd_cwnd(tp), tp->prior_cwnd); |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 526 | } |
| 527 | EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); |
| 528 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 529 | struct tcp_congestion_ops tcp_reno = { |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 530 | .flags = TCP_CONG_NON_RESTRICTED, |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 531 | .name = "reno", |
| 532 | .owner = THIS_MODULE, |
| 533 | .ssthresh = tcp_reno_ssthresh, |
| 534 | .cong_avoid = tcp_reno_cong_avoid, |
Florian Westphal | e979918 | 2016-11-21 14:18:38 +0100 | [diff] [blame] | 535 | .undo_cwnd = tcp_reno_undo_cwnd, |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 536 | }; |