blob: c0c81a2c77faebc4fa55ae387adf738a20406827 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Stephen Hemmingerc4622382007-04-20 17:07:51 -07002/*
3 * TCP Illinois congestion control.
4 * Home page:
5 * http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
6 *
7 * The algorithm is described in:
8 * "TCP-Illinois: A Loss and Delay-Based Congestion Control Algorithm
9 * for High-Speed Networks"
Joey Pabalinasecc83272018-02-27 22:05:53 -100010 * http://tamerbasar.csl.illinois.edu/LiuBasarSrikantPerfEvalArtJun2008.pdf
Stephen Hemmingerc4622382007-04-20 17:07:51 -070011 *
12 * Implemented from description in paper and ns-2 simulation.
13 * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
14 */
15
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/inet_diag.h>
19#include <asm/div64.h>
20#include <net/tcp.h>
21
22#define ALPHA_SHIFT 7
23#define ALPHA_SCALE (1u<<ALPHA_SHIFT)
24#define ALPHA_MIN ((3*ALPHA_SCALE)/10) /* ~0.3 */
25#define ALPHA_MAX (10*ALPHA_SCALE) /* 10.0 */
26#define ALPHA_BASE ALPHA_SCALE /* 1.0 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070027#define RTT_MAX (U32_MAX / ALPHA_MAX) /* 3.3 secs */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070028
29#define BETA_SHIFT 6
30#define BETA_SCALE (1u<<BETA_SHIFT)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070031#define BETA_MIN (BETA_SCALE/8) /* 0.125 */
32#define BETA_MAX (BETA_SCALE/2) /* 0.5 */
33#define BETA_BASE BETA_MAX
Stephen Hemmingerc4622382007-04-20 17:07:51 -070034
35static int win_thresh __read_mostly = 15;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070036module_param(win_thresh, int, 0);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070037MODULE_PARM_DESC(win_thresh, "Window threshold for starting adaptive sizing");
38
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070039static int theta __read_mostly = 5;
40module_param(theta, int, 0);
41MODULE_PARM_DESC(theta, "# of fast RTT's before full growth");
Stephen Hemmingerc4622382007-04-20 17:07:51 -070042
43/* TCP Illinois Parameters */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070044struct illinois {
45 u64 sum_rtt; /* sum of rtt's measured within last rtt */
46 u16 cnt_rtt; /* # of rtts measured within last rtt */
47 u32 base_rtt; /* min of all rtt in usec */
48 u32 max_rtt; /* max of all rtt in usec */
49 u32 end_seq; /* right edge of current RTT */
50 u32 alpha; /* Additive increase */
51 u32 beta; /* Muliplicative decrease */
52 u16 acked; /* # packets acked by current ACK */
53 u8 rtt_above; /* average rtt has gone above threshold */
54 u8 rtt_low; /* # of rtts measurements below threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -070055};
56
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070057static void rtt_reset(struct sock *sk)
58{
59 struct tcp_sock *tp = tcp_sk(sk);
60 struct illinois *ca = inet_csk_ca(sk);
61
62 ca->end_seq = tp->snd_nxt;
63 ca->cnt_rtt = 0;
64 ca->sum_rtt = 0;
65
66 /* TODO: age max_rtt? */
67}
68
Stephen Hemmingerc4622382007-04-20 17:07:51 -070069static void tcp_illinois_init(struct sock *sk)
70{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070071 struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070072
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070073 ca->alpha = ALPHA_MAX;
74 ca->beta = BETA_BASE;
75 ca->base_rtt = 0x7fffffff;
76 ca->max_rtt = 0;
77
78 ca->acked = 0;
79 ca->rtt_low = 0;
80 ca->rtt_above = 0;
81
82 rtt_reset(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -070083}
84
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070085/* Measure RTT for each ack. */
Lawrence Brakmo756ee172016-05-11 10:02:13 -070086static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
Stephen Hemmingerc4622382007-04-20 17:07:51 -070087{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070088 struct illinois *ca = inet_csk_ca(sk);
Lawrence Brakmo756ee172016-05-11 10:02:13 -070089 s32 rtt_us = sample->rtt_us;
Stephen Hemminger164891a2007-04-23 22:26:16 -070090
Lawrence Brakmo756ee172016-05-11 10:02:13 -070091 ca->acked = sample->pkts_acked;
Stephen Hemminger164891a2007-04-23 22:26:16 -070092
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -070093 /* dup ack, no rtt sample */
Lawrence Brakmo756ee172016-05-11 10:02:13 -070094 if (rtt_us < 0)
Ilpo Järvinenb9ce2042007-06-15 15:08:43 -070095 return;
96
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -070097 /* ignore bogus values, this prevents wraparound in alpha math */
Lawrence Brakmo756ee172016-05-11 10:02:13 -070098 if (rtt_us > RTT_MAX)
99 rtt_us = RTT_MAX;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700100
101 /* keep track of minimum RTT seen so far */
Lawrence Brakmo756ee172016-05-11 10:02:13 -0700102 if (ca->base_rtt > rtt_us)
103 ca->base_rtt = rtt_us;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700104
105 /* and max */
Lawrence Brakmo756ee172016-05-11 10:02:13 -0700106 if (ca->max_rtt < rtt_us)
107 ca->max_rtt = rtt_us;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700108
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700109 ++ca->cnt_rtt;
Lawrence Brakmo756ee172016-05-11 10:02:13 -0700110 ca->sum_rtt += rtt_us;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700111}
112
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700113/* Maximum queuing delay */
114static inline u32 max_delay(const struct illinois *ca)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700115{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700116 return ca->max_rtt - ca->base_rtt;
117}
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700118
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700119/* Average queuing delay */
120static inline u32 avg_delay(const struct illinois *ca)
121{
122 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700123
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700124 do_div(t, ca->cnt_rtt);
125 return t - ca->base_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700126}
127
128/*
129 * Compute value of alpha used for additive increase.
130 * If small window then use 1.0, equivalent to Reno.
131 *
132 * For larger windows, adjust based on average delay.
133 * A. If average delay is at minimum (we are uncongested),
134 * then use large alpha (10.0) to increase faster.
135 * B. If average delay is at maximum (getting congested)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700136 * then use small alpha (0.3)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700137 *
138 * The result is a convex window growth curve.
139 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700140static u32 alpha(struct illinois *ca, u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700141{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700142 u32 d1 = dm / 100; /* Low threshold */
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700143
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700144 if (da <= d1) {
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700145 /* If never got out of low delay zone, then use max */
146 if (!ca->rtt_above)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700147 return ALPHA_MAX;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700148
149 /* Wait for 5 good RTT's before allowing alpha to go alpha max.
150 * This prevents one good RTT from causing sudden window increase.
151 */
152 if (++ca->rtt_low < theta)
153 return ca->alpha;
154
155 ca->rtt_low = 0;
156 ca->rtt_above = 0;
157 return ALPHA_MAX;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700158 }
159
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700160 ca->rtt_above = 1;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700161
162 /*
163 * Based on:
164 *
165 * (dm - d1) amin amax
166 * k1 = -------------------
167 * amax - amin
168 *
169 * (dm - d1) amin
170 * k2 = ---------------- - d1
171 * amax - amin
172 *
173 * k1
174 * alpha = ----------
175 * k2 + da
176 */
177
178 dm -= d1;
179 da -= d1;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700180 return (dm * ALPHA_MAX) /
181 (dm + (da * (ALPHA_MAX - ALPHA_MIN)) / ALPHA_MIN);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700182}
183
184/*
185 * Beta used for multiplicative decrease.
186 * For small window sizes returns same value as Reno (0.5)
187 *
188 * If delay is small (10% of max) then beta = 1/8
189 * If delay is up to 80% of max then beta = 1/2
190 * In between is a linear function
191 */
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700192static u32 beta(u32 da, u32 dm)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700193{
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700194 u32 d2, d3;
195
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700196 d2 = dm / 10;
197 if (da <= d2)
198 return BETA_MIN;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700199
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700200 d3 = (8 * dm) / 10;
201 if (da >= d3 || d3 <= d2)
202 return BETA_MAX;
203
204 /*
205 * Based on:
206 *
207 * bmin d3 - bmax d2
208 * k3 = -------------------
209 * d3 - d2
210 *
211 * bmax - bmin
212 * k4 = -------------
213 * d3 - d2
214 *
215 * b = k3 + k4 da
216 */
217 return (BETA_MIN * d3 - BETA_MAX * d2 + (BETA_MAX - BETA_MIN) * da)
218 / (d3 - d2);
219}
220
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700221/* Update alpha and beta values once per RTT */
222static void update_params(struct sock *sk)
223{
224 struct tcp_sock *tp = tcp_sk(sk);
225 struct illinois *ca = inet_csk_ca(sk);
226
Eric Dumazet40570372022-04-05 16:35:38 -0700227 if (tcp_snd_cwnd(tp) < win_thresh) {
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700228 ca->alpha = ALPHA_BASE;
229 ca->beta = BETA_BASE;
230 } else if (ca->cnt_rtt > 0) {
231 u32 dm = max_delay(ca);
232 u32 da = avg_delay(ca);
233
234 ca->alpha = alpha(ca, da, dm);
235 ca->beta = beta(da, dm);
236 }
237
238 rtt_reset(sk);
239}
240
241/*
242 * In case of loss, reset to default values
243 */
244static void tcp_illinois_state(struct sock *sk, u8 new_state)
245{
246 struct illinois *ca = inet_csk_ca(sk);
247
248 if (new_state == TCP_CA_Loss) {
249 ca->alpha = ALPHA_BASE;
250 ca->beta = BETA_BASE;
251 ca->rtt_low = 0;
252 ca->rtt_above = 0;
253 rtt_reset(sk);
254 }
255}
256
257/*
258 * Increase window in response to successful acknowledgment.
259 */
Eric Dumazet24901552014-05-02 21:18:05 -0700260static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700261{
262 struct tcp_sock *tp = tcp_sk(sk);
263 struct illinois *ca = inet_csk_ca(sk);
264
265 if (after(ack, ca->end_seq))
266 update_params(sk);
267
268 /* RFC2861 only increase cwnd if fully utilized */
Eric Dumazet24901552014-05-02 21:18:05 -0700269 if (!tcp_is_cwnd_limited(sk))
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700270 return;
271
272 /* In slow start */
Yuchung Cheng071d5082015-07-09 13:16:29 -0700273 if (tcp_in_slow_start(tp))
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700274 tcp_slow_start(tp, acked);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700275
276 else {
277 u32 delta;
278
279 /* snd_cwnd_cnt is # of packets since last cwnd increment */
280 tp->snd_cwnd_cnt += ca->acked;
281 ca->acked = 1;
282
283 /* This is close approximation of:
284 * tp->snd_cwnd += alpha/tp->snd_cwnd
285 */
286 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
Eric Dumazet40570372022-04-05 16:35:38 -0700287 if (delta >= tcp_snd_cwnd(tp)) {
288 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp) + delta / tcp_snd_cwnd(tp),
289 (u32)tp->snd_cwnd_clamp));
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700290 tp->snd_cwnd_cnt = 0;
291 }
292 }
293}
294
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700295static u32 tcp_illinois_ssthresh(struct sock *sk)
296{
297 struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700298 struct illinois *ca = inet_csk_ca(sk);
Eric Dumazet40570372022-04-05 16:35:38 -0700299 u32 decr;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700300
301 /* Multiplicative decrease */
Eric Dumazet40570372022-04-05 16:35:38 -0700302 decr = (tcp_snd_cwnd(tp) * ca->beta) >> BETA_SHIFT;
303 return max(tcp_snd_cwnd(tp) - decr, 2U);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700304}
305
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700306/* Extract info for Tcp socket info provided via netlink. */
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700307static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
308 union tcp_cc_info *info)
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700309{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700310 const struct illinois *ca = inet_csk_ca(sk);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700311
312 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700313 info->vegas.tcpv_enabled = 1;
314 info->vegas.tcpv_rttcnt = ca->cnt_rtt;
315 info->vegas.tcpv_minrtt = ca->base_rtt;
316 info->vegas.tcpv_rtt = 0;
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700317
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700318 if (info->vegas.tcpv_rttcnt > 0) {
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000319 u64 t = ca->sum_rtt;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700320
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700321 do_div(t, info->vegas.tcpv_rttcnt);
322 info->vegas.tcpv_rtt = t;
Jesper Dangaard Brouer8f363b72012-10-31 02:45:32 +0000323 }
Eric Dumazet64f40ff2015-04-28 16:23:48 -0700324 *attr = INET_DIAG_VEGASINFO;
325 return sizeof(struct tcpvegas_info);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700326 }
Eric Dumazet521f1cf2015-04-16 18:10:35 -0700327 return 0;
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700328}
329
Stephen Hemmingera252beb2011-03-10 00:40:17 -0800330static struct tcp_congestion_ops tcp_illinois __read_mostly = {
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700331 .init = tcp_illinois_init,
332 .ssthresh = tcp_illinois_ssthresh,
Yuchung Chengf1722a12017-08-03 20:38:52 -0700333 .undo_cwnd = tcp_reno_undo_cwnd,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700334 .cong_avoid = tcp_illinois_cong_avoid,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700335 .set_state = tcp_illinois_state,
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700336 .get_info = tcp_illinois_info,
337 .pkts_acked = tcp_illinois_acked,
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700338
339 .owner = THIS_MODULE,
340 .name = "illinois",
341};
342
343static int __init tcp_illinois_register(void)
344{
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700345 BUILD_BUG_ON(sizeof(struct illinois) > ICSK_CA_PRIV_SIZE);
Stephen Hemmingerc4622382007-04-20 17:07:51 -0700346 return tcp_register_congestion_control(&tcp_illinois);
347}
348
349static void __exit tcp_illinois_unregister(void)
350{
351 tcp_unregister_congestion_control(&tcp_illinois);
352}
353
354module_init(tcp_illinois_register);
355module_exit(tcp_illinois_unregister);
356
357MODULE_AUTHOR("Stephen Hemminger, Shao Liu");
358MODULE_LICENSE("GPL");
359MODULE_DESCRIPTION("TCP Illinois");
Stephen Hemminger65d1b4a2007-04-23 22:24:32 -0700360MODULE_VERSION("1.0");