Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 1 | /* |
| 2 | * TCP Veno congestion control |
| 3 | * |
| 4 | * This is based on the congestion detection/avoidance scheme described in |
| 5 | * C. P. Fu, S. C. Liew. |
| 6 | * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." |
| 7 | * IEEE Journal on Selected Areas in Communication, |
| 8 | * Feb. 2003. |
Justin P. Mattock | 631dd1a | 2010-10-18 11:03:14 +0200 | [diff] [blame] | 9 | * See http://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 12 | #include <linux/mm.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/inet_diag.h> |
| 16 | |
| 17 | #include <net/tcp.h> |
| 18 | |
| 19 | /* Default values of the Veno variables, in fixed-point representation |
| 20 | * with V_PARAM_SHIFT bits to the right of the binary point. |
| 21 | */ |
| 22 | #define V_PARAM_SHIFT 1 |
| 23 | static const int beta = 3 << V_PARAM_SHIFT; |
| 24 | |
| 25 | /* Veno variables */ |
| 26 | struct veno { |
| 27 | u8 doing_veno_now; /* if true, do veno for this rtt */ |
| 28 | u16 cntrtt; /* # of rtts measured within last rtt */ |
| 29 | u32 minrtt; /* min of rtts measured within last rtt (in usec) */ |
| 30 | u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ |
| 31 | u32 inc; /* decide whether to increase cwnd */ |
| 32 | u32 diff; /* calculate the diff rate */ |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame] | 33 | u32 loss_cwnd; /* cwnd when loss occured */ |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
| 36 | /* There are several situations when we must "re-start" Veno: |
| 37 | * |
| 38 | * o when a connection is established |
| 39 | * o after an RTO |
| 40 | * o after fast recovery |
| 41 | * o when we send a packet and there is no outstanding |
| 42 | * unacknowledged data (restarting an idle connection) |
| 43 | * |
| 44 | */ |
| 45 | static inline void veno_enable(struct sock *sk) |
| 46 | { |
| 47 | struct veno *veno = inet_csk_ca(sk); |
| 48 | |
| 49 | /* turn on Veno */ |
| 50 | veno->doing_veno_now = 1; |
| 51 | |
| 52 | veno->minrtt = 0x7fffffff; |
| 53 | } |
| 54 | |
| 55 | static inline void veno_disable(struct sock *sk) |
| 56 | { |
| 57 | struct veno *veno = inet_csk_ca(sk); |
| 58 | |
| 59 | /* turn off Veno */ |
| 60 | veno->doing_veno_now = 0; |
| 61 | } |
| 62 | |
| 63 | static void tcp_veno_init(struct sock *sk) |
| 64 | { |
| 65 | struct veno *veno = inet_csk_ca(sk); |
| 66 | |
| 67 | veno->basertt = 0x7fffffff; |
| 68 | veno->inc = 1; |
| 69 | veno_enable(sk); |
| 70 | } |
| 71 | |
| 72 | /* Do rtt sampling needed for Veno. */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 73 | static void tcp_veno_pkts_acked(struct sock *sk, |
| 74 | const struct ack_sample *sample) |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 75 | { |
| 76 | struct veno *veno = inet_csk_ca(sk); |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 77 | u32 vrtt; |
| 78 | |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 79 | if (sample->rtt_us < 0) |
Ilpo Järvinen | b9ce204 | 2007-06-15 15:08:43 -0700 | [diff] [blame] | 80 | return; |
| 81 | |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 82 | /* Never allow zero rtt or baseRTT */ |
Lawrence Brakmo | 756ee17 | 2016-05-11 10:02:13 -0700 | [diff] [blame] | 83 | vrtt = sample->rtt_us + 1; |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 84 | |
| 85 | /* Filter to find propagation delay: */ |
| 86 | if (vrtt < veno->basertt) |
| 87 | veno->basertt = vrtt; |
| 88 | |
| 89 | /* Find the min rtt during the last rtt to find |
| 90 | * the current prop. delay + queuing delay: |
| 91 | */ |
| 92 | veno->minrtt = min(veno->minrtt, vrtt); |
| 93 | veno->cntrtt++; |
| 94 | } |
| 95 | |
| 96 | static void tcp_veno_state(struct sock *sk, u8 ca_state) |
| 97 | { |
| 98 | if (ca_state == TCP_CA_Open) |
| 99 | veno_enable(sk); |
| 100 | else |
| 101 | veno_disable(sk); |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * If the connection is idle and we are restarting, |
| 106 | * then we don't want to do any Veno calculations |
| 107 | * until we get fresh rtt samples. So when we |
| 108 | * restart, we reset our Veno state to a clean |
| 109 | * state. After we get acks for this flight of |
| 110 | * packets, _then_ we can make Veno calculations |
| 111 | * again. |
| 112 | */ |
| 113 | static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event) |
| 114 | { |
| 115 | if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) |
| 116 | tcp_veno_init(sk); |
| 117 | } |
| 118 | |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 119 | static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 120 | { |
| 121 | struct tcp_sock *tp = tcp_sk(sk); |
| 122 | struct veno *veno = inet_csk_ca(sk); |
| 123 | |
Harvey Harrison | ab59859 | 2008-05-01 02:47:38 -0700 | [diff] [blame] | 124 | if (!veno->doing_veno_now) { |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 125 | tcp_reno_cong_avoid(sk, ack, acked); |
Harvey Harrison | ab59859 | 2008-05-01 02:47:38 -0700 | [diff] [blame] | 126 | return; |
| 127 | } |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 128 | |
| 129 | /* limited by applications */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 130 | if (!tcp_is_cwnd_limited(sk)) |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 131 | return; |
| 132 | |
| 133 | /* We do the Veno calculations only if we got enough rtt samples */ |
| 134 | if (veno->cntrtt <= 2) { |
| 135 | /* We don't have enough rtt samples to do the Veno |
| 136 | * calculation, so we'll behave like Reno. |
| 137 | */ |
Eric Dumazet | 2490155 | 2014-05-02 21:18:05 -0700 | [diff] [blame] | 138 | tcp_reno_cong_avoid(sk, ack, acked); |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 139 | } else { |
Lachlan Andrew | 1591311 | 2008-04-30 01:04:03 -0700 | [diff] [blame] | 140 | u64 target_cwnd; |
| 141 | u32 rtt; |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 142 | |
| 143 | /* We have enough rtt samples, so, using the Veno |
| 144 | * algorithm, we determine the state of the network. |
| 145 | */ |
| 146 | |
| 147 | rtt = veno->minrtt; |
| 148 | |
Christoph Paasch | 45a0769 | 2014-07-29 12:07:27 +0200 | [diff] [blame] | 149 | target_cwnd = (u64)tp->snd_cwnd * veno->basertt; |
Lachlan Andrew | 1591311 | 2008-04-30 01:04:03 -0700 | [diff] [blame] | 150 | target_cwnd <<= V_PARAM_SHIFT; |
| 151 | do_div(target_cwnd, rtt); |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 152 | |
| 153 | veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd; |
| 154 | |
Yuchung Cheng | 071d508 | 2015-07-09 13:16:29 -0700 | [diff] [blame] | 155 | if (tcp_in_slow_start(tp)) { |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 156 | /* Slow start. */ |
Yuchung Cheng | 9f9843a7 | 2013-10-31 11:07:31 -0700 | [diff] [blame] | 157 | tcp_slow_start(tp, acked); |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 158 | } else { |
| 159 | /* Congestion avoidance. */ |
| 160 | if (veno->diff < beta) { |
| 161 | /* In the "non-congestive state", increase cwnd |
| 162 | * every rtt. |
| 163 | */ |
Neal Cardwell | e73ebb0 | 2015-01-28 20:01:35 -0500 | [diff] [blame] | 164 | tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1); |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 165 | } else { |
| 166 | /* In the "congestive state", increase cwnd |
| 167 | * every other rtt. |
| 168 | */ |
| 169 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
Joe Perches | 9d4fb27 | 2009-11-23 10:41:23 -0800 | [diff] [blame] | 170 | if (veno->inc && |
| 171 | tp->snd_cwnd < tp->snd_cwnd_clamp) { |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 172 | tp->snd_cwnd++; |
| 173 | veno->inc = 0; |
| 174 | } else |
| 175 | veno->inc = 1; |
| 176 | tp->snd_cwnd_cnt = 0; |
| 177 | } else |
| 178 | tp->snd_cwnd_cnt++; |
| 179 | } |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 180 | } |
| 181 | if (tp->snd_cwnd < 2) |
| 182 | tp->snd_cwnd = 2; |
| 183 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) |
| 184 | tp->snd_cwnd = tp->snd_cwnd_clamp; |
| 185 | } |
| 186 | /* Wipe the slate clean for the next rtt. */ |
| 187 | /* veno->cntrtt = 0; */ |
| 188 | veno->minrtt = 0x7fffffff; |
| 189 | } |
| 190 | |
| 191 | /* Veno MD phase */ |
| 192 | static u32 tcp_veno_ssthresh(struct sock *sk) |
| 193 | { |
| 194 | const struct tcp_sock *tp = tcp_sk(sk); |
| 195 | struct veno *veno = inet_csk_ca(sk); |
| 196 | |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame] | 197 | veno->loss_cwnd = tp->snd_cwnd; |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 198 | if (veno->diff < beta) |
| 199 | /* in "non-congestive state", cut cwnd by 1/5 */ |
| 200 | return max(tp->snd_cwnd * 4 / 5, 2U); |
| 201 | else |
| 202 | /* in "congestive state", cut cwnd by 1/2 */ |
| 203 | return max(tp->snd_cwnd >> 1U, 2U); |
| 204 | } |
| 205 | |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame] | 206 | static u32 tcp_veno_cwnd_undo(struct sock *sk) |
| 207 | { |
| 208 | const struct veno *veno = inet_csk_ca(sk); |
| 209 | |
| 210 | return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd); |
| 211 | } |
| 212 | |
Stephen Hemminger | a252beb | 2011-03-10 00:40:17 -0800 | [diff] [blame] | 213 | static struct tcp_congestion_ops tcp_veno __read_mostly = { |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 214 | .init = tcp_veno_init, |
| 215 | .ssthresh = tcp_veno_ssthresh, |
Florian Westphal | 85f7e75 | 2016-11-21 14:18:37 +0100 | [diff] [blame] | 216 | .undo_cwnd = tcp_veno_cwnd_undo, |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 217 | .cong_avoid = tcp_veno_cong_avoid, |
Stephen Hemminger | 164891a | 2007-04-23 22:26:16 -0700 | [diff] [blame] | 218 | .pkts_acked = tcp_veno_pkts_acked, |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 219 | .set_state = tcp_veno_state, |
| 220 | .cwnd_event = tcp_veno_cwnd_event, |
| 221 | |
| 222 | .owner = THIS_MODULE, |
| 223 | .name = "veno", |
| 224 | }; |
| 225 | |
| 226 | static int __init tcp_veno_register(void) |
| 227 | { |
Alexey Dobriyan | 74975d4 | 2006-08-25 17:10:33 -0700 | [diff] [blame] | 228 | BUILD_BUG_ON(sizeof(struct veno) > ICSK_CA_PRIV_SIZE); |
Bin Zhou | 76f1017 | 2006-06-05 17:28:30 -0700 | [diff] [blame] | 229 | tcp_register_congestion_control(&tcp_veno); |
| 230 | return 0; |
| 231 | } |
| 232 | |
| 233 | static void __exit tcp_veno_unregister(void) |
| 234 | { |
| 235 | tcp_unregister_congestion_control(&tcp_veno); |
| 236 | } |
| 237 | |
| 238 | module_init(tcp_veno_register); |
| 239 | module_exit(tcp_veno_unregister); |
| 240 | |
| 241 | MODULE_AUTHOR("Bin Zhou, Cheng Peng Fu"); |
| 242 | MODULE_LICENSE("GPL"); |
| 243 | MODULE_DESCRIPTION("TCP Veno"); |