Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing) |
| 4 | * |
Eric Dumazet | 86b3bfe | 2015-01-28 06:06:36 -0800 | [diff] [blame] | 5 | * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com> |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 6 | * |
Simon Horman | 05e8bb8 | 2015-04-02 11:20:23 +0900 | [diff] [blame] | 7 | * Meant to be mostly used for locally generated traffic : |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 8 | * Fast classification depends on skb->sk being set before reaching us. |
| 9 | * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash. |
| 10 | * All packets belonging to a socket are considered as a 'flow'. |
| 11 | * |
| 12 | * Flows are dynamically allocated and stored in a hash table of RB trees |
| 13 | * They are also part of one Round Robin 'queues' (new or old flows) |
| 14 | * |
| 15 | * Burst avoidance (aka pacing) capability : |
| 16 | * |
| 17 | * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a |
| 18 | * bunch of packets, and this packet scheduler adds delay between |
| 19 | * packets to respect rate limitation. |
| 20 | * |
| 21 | * enqueue() : |
| 22 | * - lookup one RB tree (out of 1024 or more) to find the flow. |
| 23 | * If non existent flow, create it, add it to the tree. |
| 24 | * Add skb to the per flow list of skb (fifo). |
| 25 | * - Use a special fifo for high prio packets |
| 26 | * |
| 27 | * dequeue() : serves flows in Round Robin |
| 28 | * Note : When a flow becomes empty, we do not immediately remove it from |
| 29 | * rb trees, for performance reasons (its expected to send additional packets, |
| 30 | * or SLAB cache will reuse socket for another flow) |
| 31 | */ |
| 32 | |
| 33 | #include <linux/module.h> |
| 34 | #include <linux/types.h> |
| 35 | #include <linux/kernel.h> |
| 36 | #include <linux/jiffies.h> |
| 37 | #include <linux/string.h> |
| 38 | #include <linux/in.h> |
| 39 | #include <linux/errno.h> |
| 40 | #include <linux/init.h> |
| 41 | #include <linux/skbuff.h> |
| 42 | #include <linux/slab.h> |
| 43 | #include <linux/rbtree.h> |
| 44 | #include <linux/hash.h> |
Eric Dumazet | 08f89b9 | 2013-08-30 09:46:43 -0700 | [diff] [blame] | 45 | #include <linux/prefetch.h> |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 46 | #include <linux/vmalloc.h> |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 47 | #include <net/netlink.h> |
| 48 | #include <net/pkt_sched.h> |
| 49 | #include <net/sock.h> |
| 50 | #include <net/tcp_states.h> |
Eric Dumazet | 9878196 | 2015-02-03 18:31:53 -0800 | [diff] [blame] | 51 | #include <net/tcp.h> |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 52 | |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 53 | struct fq_skb_cb { |
| 54 | u64 time_to_send; |
| 55 | }; |
| 56 | |
| 57 | static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb) |
| 58 | { |
| 59 | qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb)); |
| 60 | return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data; |
| 61 | } |
| 62 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 63 | /* |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 64 | * Per flow structure, dynamically allocated. |
| 65 | * If packets have monotically increasing time_to_send, they are placed in O(1) |
| 66 | * in linear list (head,tail), otherwise are placed in a rbtree (t_root). |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 67 | */ |
| 68 | struct fq_flow { |
Eric Dumazet | 7ba0537 | 2020-05-02 19:54:19 -0700 | [diff] [blame] | 69 | /* First cache line : used in fq_gc(), fq_enqueue(), fq_dequeue() */ |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 70 | struct rb_root t_root; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 71 | struct sk_buff *head; /* list of skbs for this flow : first skb */ |
| 72 | union { |
| 73 | struct sk_buff *tail; /* last skb in the list */ |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 74 | unsigned long age; /* (jiffies | 1UL) when flow was emptied, for gc */ |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 75 | }; |
Simon Horman | 05e8bb8 | 2015-04-02 11:20:23 +0900 | [diff] [blame] | 76 | struct rb_node fq_node; /* anchor in fq_root[] trees */ |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 77 | struct sock *sk; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 78 | u32 socket_hash; /* sk_hash */ |
Eric Dumazet | 7ba0537 | 2020-05-02 19:54:19 -0700 | [diff] [blame] | 79 | int qlen; /* number of packets in flow queue */ |
| 80 | |
| 81 | /* Second cache line, used in fq_dequeue() */ |
| 82 | int credit; |
| 83 | /* 32bit hole on 64bit arches */ |
| 84 | |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 85 | struct fq_flow *next; /* next pointer in RR lists */ |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 86 | |
| 87 | struct rb_node rate_node; /* anchor in q->delayed tree */ |
| 88 | u64 time_next_packet; |
Eric Dumazet | 7ba0537 | 2020-05-02 19:54:19 -0700 | [diff] [blame] | 89 | } ____cacheline_aligned_in_smp; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 90 | |
| 91 | struct fq_flow_head { |
| 92 | struct fq_flow *first; |
| 93 | struct fq_flow *last; |
| 94 | }; |
| 95 | |
| 96 | struct fq_sched_data { |
| 97 | struct fq_flow_head new_flows; |
| 98 | |
| 99 | struct fq_flow_head old_flows; |
| 100 | |
| 101 | struct rb_root delayed; /* for rate limited flows */ |
| 102 | u64 time_next_delayed_flow; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 103 | u64 ktime_cache; /* copy of last ktime_get_ns() */ |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 104 | unsigned long unthrottle_latency_ns; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 105 | |
| 106 | struct fq_flow internal; /* for non classified or high prio packets */ |
| 107 | u32 quantum; |
| 108 | u32 initial_quantum; |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 109 | u32 flow_refill_delay; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 110 | u32 flow_plimit; /* max packets per flow */ |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 111 | unsigned long flow_max_rate; /* optional max rate per flow */ |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 112 | u64 ce_threshold; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 113 | u64 horizon; /* horizon in ns */ |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 114 | u32 orphan_mask; /* mask for orphaned skb */ |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 115 | u32 low_rate_threshold; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 116 | struct rb_root *fq_root; |
| 117 | u8 rate_enable; |
| 118 | u8 fq_trees_log; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 119 | u8 horizon_drop; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 120 | u32 flows; |
| 121 | u32 inactive_flows; |
| 122 | u32 throttled_flows; |
| 123 | |
| 124 | u64 stat_gc_flows; |
| 125 | u64 stat_internal_packets; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 126 | u64 stat_throttled; |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 127 | u64 stat_ce_mark; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 128 | u64 stat_horizon_drops; |
| 129 | u64 stat_horizon_caps; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 130 | u64 stat_flows_plimit; |
| 131 | u64 stat_pkts_too_long; |
| 132 | u64 stat_allocation_errors; |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 133 | |
| 134 | u32 timer_slack; /* hrtimer slack in ns */ |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 135 | struct qdisc_watchdog watchdog; |
| 136 | }; |
| 137 | |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 138 | /* |
| 139 | * f->tail and f->age share the same location. |
| 140 | * We can use the low order bit to differentiate if this location points |
| 141 | * to a sk_buff or contains a jiffies value, if we force this value to be odd. |
| 142 | * This assumes f->tail low order bit must be 0 since alignof(struct sk_buff) >= 2 |
| 143 | */ |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 144 | static void fq_flow_set_detached(struct fq_flow *f) |
| 145 | { |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 146 | f->age = jiffies | 1UL; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 147 | } |
| 148 | |
| 149 | static bool fq_flow_is_detached(const struct fq_flow *f) |
| 150 | { |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 151 | return !!(f->age & 1UL); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Eric Dumazet | dde0a64 | 2020-05-02 19:54:18 -0700 | [diff] [blame] | 154 | /* special value to mark a throttled flow (not on old/new list) */ |
| 155 | static struct fq_flow throttled; |
| 156 | |
Eric Dumazet | 7df40c2 | 2018-05-02 10:03:30 -0700 | [diff] [blame] | 157 | static bool fq_flow_is_throttled(const struct fq_flow *f) |
| 158 | { |
| 159 | return f->next == &throttled; |
| 160 | } |
| 161 | |
| 162 | static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) |
| 163 | { |
| 164 | if (head->first) |
| 165 | head->last->next = flow; |
| 166 | else |
| 167 | head->first = flow; |
| 168 | head->last = flow; |
| 169 | flow->next = NULL; |
| 170 | } |
| 171 | |
| 172 | static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) |
| 173 | { |
| 174 | rb_erase(&f->rate_node, &q->delayed); |
| 175 | q->throttled_flows--; |
| 176 | fq_flow_add_tail(&q->old_flows, f); |
| 177 | } |
| 178 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 179 | static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) |
| 180 | { |
| 181 | struct rb_node **p = &q->delayed.rb_node, *parent = NULL; |
| 182 | |
| 183 | while (*p) { |
| 184 | struct fq_flow *aux; |
| 185 | |
| 186 | parent = *p; |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 187 | aux = rb_entry(parent, struct fq_flow, rate_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 188 | if (f->time_next_packet >= aux->time_next_packet) |
| 189 | p = &parent->rb_right; |
| 190 | else |
| 191 | p = &parent->rb_left; |
| 192 | } |
| 193 | rb_link_node(&f->rate_node, parent, p); |
| 194 | rb_insert_color(&f->rate_node, &q->delayed); |
| 195 | q->throttled_flows++; |
| 196 | q->stat_throttled++; |
| 197 | |
| 198 | f->next = &throttled; |
| 199 | if (q->time_next_delayed_flow > f->time_next_packet) |
| 200 | q->time_next_delayed_flow = f->time_next_packet; |
| 201 | } |
| 202 | |
| 203 | |
| 204 | static struct kmem_cache *fq_flow_cachep __read_mostly; |
| 205 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 206 | |
| 207 | /* limit number of collected flows per round */ |
| 208 | #define FQ_GC_MAX 8 |
| 209 | #define FQ_GC_AGE (3*HZ) |
| 210 | |
| 211 | static bool fq_gc_candidate(const struct fq_flow *f) |
| 212 | { |
| 213 | return fq_flow_is_detached(f) && |
| 214 | time_after(jiffies, f->age + FQ_GC_AGE); |
| 215 | } |
| 216 | |
| 217 | static void fq_gc(struct fq_sched_data *q, |
| 218 | struct rb_root *root, |
| 219 | struct sock *sk) |
| 220 | { |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 221 | struct rb_node **p, *parent; |
Eric Dumazet | 82a0aa5 | 2020-05-02 19:54:20 -0700 | [diff] [blame] | 222 | void *tofree[FQ_GC_MAX]; |
| 223 | struct fq_flow *f; |
| 224 | int i, fcnt = 0; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 225 | |
| 226 | p = &root->rb_node; |
| 227 | parent = NULL; |
| 228 | while (*p) { |
| 229 | parent = *p; |
| 230 | |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 231 | f = rb_entry(parent, struct fq_flow, fq_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 232 | if (f->sk == sk) |
| 233 | break; |
| 234 | |
| 235 | if (fq_gc_candidate(f)) { |
| 236 | tofree[fcnt++] = f; |
| 237 | if (fcnt == FQ_GC_MAX) |
| 238 | break; |
| 239 | } |
| 240 | |
| 241 | if (f->sk > sk) |
| 242 | p = &parent->rb_right; |
| 243 | else |
| 244 | p = &parent->rb_left; |
| 245 | } |
| 246 | |
Eric Dumazet | 82a0aa5 | 2020-05-02 19:54:20 -0700 | [diff] [blame] | 247 | if (!fcnt) |
| 248 | return; |
| 249 | |
| 250 | for (i = fcnt; i > 0; ) { |
| 251 | f = tofree[--i]; |
| 252 | rb_erase(&f->fq_node, root); |
| 253 | } |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 254 | q->flows -= fcnt; |
| 255 | q->inactive_flows -= fcnt; |
| 256 | q->stat_gc_flows += fcnt; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 257 | |
Eric Dumazet | 82a0aa5 | 2020-05-02 19:54:20 -0700 | [diff] [blame] | 258 | kmem_cache_free_bulk(fq_flow_cachep, fcnt, tofree); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 261 | static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) |
| 262 | { |
| 263 | struct rb_node **p, *parent; |
| 264 | struct sock *sk = skb->sk; |
| 265 | struct rb_root *root; |
| 266 | struct fq_flow *f; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 267 | |
| 268 | /* warning: no starvation prevention... */ |
Maciej Żenczykowski | 2abc2f0 | 2013-11-14 08:50:43 -0800 | [diff] [blame] | 269 | if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 270 | return &q->internal; |
| 271 | |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 272 | /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket |
Eric Dumazet | e446f9d | 2015-10-08 05:01:55 -0700 | [diff] [blame] | 273 | * or a listener (SYNCOOKIE mode) |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 274 | * 1) request sockets are not full blown, |
| 275 | * they do not contain sk_pacing_rate |
| 276 | * 2) They are not part of a 'flow' yet |
| 277 | * 3) We do not want to rate limit them (eg SYNFLOOD attack), |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 278 | * especially if the listener set SO_MAX_PACING_RATE |
Eric Dumazet | ca6fb06 | 2015-10-02 11:43:35 -0700 | [diff] [blame] | 279 | * 4) We pretend they are orphaned |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 280 | */ |
Eric Dumazet | e446f9d | 2015-10-08 05:01:55 -0700 | [diff] [blame] | 281 | if (!sk || sk_listener(sk)) { |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 282 | unsigned long hash = skb_get_hash(skb) & q->orphan_mask; |
| 283 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 284 | /* By forcing low order bit to 1, we make sure to not |
| 285 | * collide with a local flow (socket pointers are word aligned) |
| 286 | */ |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 287 | sk = (struct sock *)((hash << 1) | 1UL); |
| 288 | skb_orphan(skb); |
Eric Dumazet | 37c0aea | 2019-05-04 16:48:54 -0700 | [diff] [blame] | 289 | } else if (sk->sk_state == TCP_CLOSE) { |
| 290 | unsigned long hash = skb_get_hash(skb) & q->orphan_mask; |
| 291 | /* |
| 292 | * Sockets in TCP_CLOSE are non connected. |
| 293 | * Typical use case is UDP sockets, they can send packets |
| 294 | * with sendto() to many different destinations. |
| 295 | * We probably could use a generic bit advertising |
| 296 | * non connected sockets, instead of sk_state == TCP_CLOSE, |
| 297 | * if we care enough. |
| 298 | */ |
| 299 | sk = (struct sock *)((hash << 1) | 1UL); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Eric Dumazet | 29c5847 | 2016-11-17 09:48:30 -0800 | [diff] [blame] | 302 | root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 303 | |
| 304 | if (q->flows >= (2U << q->fq_trees_log) && |
| 305 | q->inactive_flows > q->flows/2) |
| 306 | fq_gc(q, root, sk); |
| 307 | |
| 308 | p = &root->rb_node; |
| 309 | parent = NULL; |
| 310 | while (*p) { |
| 311 | parent = *p; |
| 312 | |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 313 | f = rb_entry(parent, struct fq_flow, fq_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 314 | if (f->sk == sk) { |
| 315 | /* socket might have been reallocated, so check |
| 316 | * if its sk_hash is the same. |
| 317 | * It not, we need to refill credit with |
| 318 | * initial quantum |
| 319 | */ |
Eric Dumazet | 37c0aea | 2019-05-04 16:48:54 -0700 | [diff] [blame] | 320 | if (unlikely(skb->sk == sk && |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 321 | f->socket_hash != sk->sk_hash)) { |
| 322 | f->credit = q->initial_quantum; |
| 323 | f->socket_hash = sk->sk_hash; |
Eric Dumazet | bb3d0b8 | 2019-12-23 11:13:24 -0800 | [diff] [blame] | 324 | if (q->rate_enable) |
| 325 | smp_store_release(&sk->sk_pacing_status, |
| 326 | SK_PACING_FQ); |
Eric Dumazet | 7df40c2 | 2018-05-02 10:03:30 -0700 | [diff] [blame] | 327 | if (fq_flow_is_throttled(f)) |
| 328 | fq_flow_unset_throttled(q, f); |
Eric Dumazet | fc59d5b | 2013-10-27 16:26:43 -0700 | [diff] [blame] | 329 | f->time_next_packet = 0ULL; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 330 | } |
| 331 | return f; |
| 332 | } |
| 333 | if (f->sk > sk) |
| 334 | p = &parent->rb_right; |
| 335 | else |
| 336 | p = &parent->rb_left; |
| 337 | } |
| 338 | |
| 339 | f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); |
| 340 | if (unlikely(!f)) { |
| 341 | q->stat_allocation_errors++; |
| 342 | return &q->internal; |
| 343 | } |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 344 | /* f->t_root is already zeroed after kmem_cache_zalloc() */ |
| 345 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 346 | fq_flow_set_detached(f); |
| 347 | f->sk = sk; |
Eric Dumazet | bb3d0b8 | 2019-12-23 11:13:24 -0800 | [diff] [blame] | 348 | if (skb->sk == sk) { |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 349 | f->socket_hash = sk->sk_hash; |
Eric Dumazet | bb3d0b8 | 2019-12-23 11:13:24 -0800 | [diff] [blame] | 350 | if (q->rate_enable) |
| 351 | smp_store_release(&sk->sk_pacing_status, |
| 352 | SK_PACING_FQ); |
| 353 | } |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 354 | f->credit = q->initial_quantum; |
| 355 | |
| 356 | rb_link_node(&f->fq_node, parent, p); |
| 357 | rb_insert_color(&f->fq_node, root); |
| 358 | |
| 359 | q->flows++; |
| 360 | q->inactive_flows++; |
| 361 | return f; |
| 362 | } |
| 363 | |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 364 | static struct sk_buff *fq_peek(struct fq_flow *flow) |
| 365 | { |
| 366 | struct sk_buff *skb = skb_rb_first(&flow->t_root); |
| 367 | struct sk_buff *head = flow->head; |
| 368 | |
| 369 | if (!skb) |
| 370 | return head; |
| 371 | |
| 372 | if (!head) |
| 373 | return skb; |
| 374 | |
| 375 | if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send) |
| 376 | return skb; |
| 377 | return head; |
| 378 | } |
| 379 | |
| 380 | static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow, |
| 381 | struct sk_buff *skb) |
| 382 | { |
| 383 | if (skb == flow->head) { |
| 384 | flow->head = skb->next; |
| 385 | } else { |
| 386 | rb_erase(&skb->rbnode, &flow->t_root); |
| 387 | skb->dev = qdisc_dev(sch); |
| 388 | } |
| 389 | } |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 390 | |
Eric Dumazet | c288b0c | 2020-05-02 19:54:21 -0700 | [diff] [blame] | 391 | /* Remove one skb from flow queue. |
| 392 | * This skb must be the return value of prior fq_peek(). |
| 393 | */ |
| 394 | static void fq_dequeue_skb(struct Qdisc *sch, struct fq_flow *flow, |
| 395 | struct sk_buff *skb) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 396 | { |
Eric Dumazet | c288b0c | 2020-05-02 19:54:21 -0700 | [diff] [blame] | 397 | fq_erase_head(sch, flow, skb); |
| 398 | skb_mark_not_on_list(skb); |
| 399 | flow->qlen--; |
| 400 | qdisc_qstats_backlog_dec(sch, skb); |
| 401 | sch->q.qlen--; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 402 | } |
| 403 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 404 | static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) |
| 405 | { |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 406 | struct rb_node **p, *parent; |
| 407 | struct sk_buff *head, *aux; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 408 | |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 409 | head = flow->head; |
| 410 | if (!head || |
| 411 | fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) { |
| 412 | if (!head) |
| 413 | flow->head = skb; |
| 414 | else |
| 415 | flow->tail->next = skb; |
| 416 | flow->tail = skb; |
| 417 | skb->next = NULL; |
| 418 | return; |
| 419 | } |
| 420 | |
| 421 | p = &flow->t_root.rb_node; |
| 422 | parent = NULL; |
| 423 | |
| 424 | while (*p) { |
| 425 | parent = *p; |
| 426 | aux = rb_to_skb(parent); |
| 427 | if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send) |
| 428 | p = &parent->rb_right; |
| 429 | else |
| 430 | p = &parent->rb_left; |
| 431 | } |
| 432 | rb_link_node(&skb->rbnode, parent, p); |
| 433 | rb_insert_color(&skb->rbnode, &flow->t_root); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 436 | static bool fq_packet_beyond_horizon(const struct sk_buff *skb, |
| 437 | const struct fq_sched_data *q) |
| 438 | { |
| 439 | return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); |
| 440 | } |
| 441 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 442 | static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 443 | struct sk_buff **to_free) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 444 | { |
| 445 | struct fq_sched_data *q = qdisc_priv(sch); |
| 446 | struct fq_flow *f; |
| 447 | |
| 448 | if (unlikely(sch->q.qlen >= sch->limit)) |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 449 | return qdisc_drop(skb, sch, to_free); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 450 | |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 451 | if (!skb->tstamp) { |
| 452 | fq_skb_cb(skb)->time_to_send = q->ktime_cache = ktime_get_ns(); |
| 453 | } else { |
| 454 | /* Check if packet timestamp is too far in the future. |
| 455 | * Try first if our cached value, to avoid ktime_get_ns() |
| 456 | * cost in most cases. |
| 457 | */ |
| 458 | if (fq_packet_beyond_horizon(skb, q)) { |
| 459 | /* Refresh our cache and check another time */ |
| 460 | q->ktime_cache = ktime_get_ns(); |
| 461 | if (fq_packet_beyond_horizon(skb, q)) { |
| 462 | if (q->horizon_drop) { |
| 463 | q->stat_horizon_drops++; |
| 464 | return qdisc_drop(skb, sch, to_free); |
| 465 | } |
| 466 | q->stat_horizon_caps++; |
| 467 | skb->tstamp = q->ktime_cache + q->horizon; |
| 468 | } |
| 469 | } |
| 470 | fq_skb_cb(skb)->time_to_send = skb->tstamp; |
| 471 | } |
| 472 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 473 | f = fq_classify(skb, q); |
| 474 | if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { |
| 475 | q->stat_flows_plimit++; |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 476 | return qdisc_drop(skb, sch, to_free); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 477 | } |
| 478 | |
| 479 | f->qlen++; |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 480 | qdisc_qstats_backlog_inc(sch, skb); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 481 | if (fq_flow_is_detached(f)) { |
| 482 | fq_flow_add_tail(&q->new_flows, f); |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 483 | if (time_after(jiffies, f->age + q->flow_refill_delay)) |
| 484 | f->credit = max_t(u32, f->credit, q->quantum); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 485 | q->inactive_flows--; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 486 | } |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 487 | |
| 488 | /* Note: this overwrites f->age */ |
| 489 | flow_queue_add(f, skb); |
| 490 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 491 | if (unlikely(f == &q->internal)) { |
| 492 | q->stat_internal_packets++; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 493 | } |
| 494 | sch->q.qlen++; |
| 495 | |
| 496 | return NET_XMIT_SUCCESS; |
| 497 | } |
| 498 | |
| 499 | static void fq_check_throttled(struct fq_sched_data *q, u64 now) |
| 500 | { |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 501 | unsigned long sample; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 502 | struct rb_node *p; |
| 503 | |
| 504 | if (q->time_next_delayed_flow > now) |
| 505 | return; |
| 506 | |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 507 | /* Update unthrottle latency EWMA. |
| 508 | * This is cheap and can help diagnosing timer/latency problems. |
| 509 | */ |
| 510 | sample = (unsigned long)(now - q->time_next_delayed_flow); |
| 511 | q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; |
| 512 | q->unthrottle_latency_ns += sample >> 3; |
| 513 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 514 | q->time_next_delayed_flow = ~0ULL; |
| 515 | while ((p = rb_first(&q->delayed)) != NULL) { |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 516 | struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 517 | |
| 518 | if (f->time_next_packet > now) { |
| 519 | q->time_next_delayed_flow = f->time_next_packet; |
| 520 | break; |
| 521 | } |
Eric Dumazet | 7df40c2 | 2018-05-02 10:03:30 -0700 | [diff] [blame] | 522 | fq_flow_unset_throttled(q, f); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 523 | } |
| 524 | } |
| 525 | |
| 526 | static struct sk_buff *fq_dequeue(struct Qdisc *sch) |
| 527 | { |
| 528 | struct fq_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 529 | struct fq_flow_head *head; |
| 530 | struct sk_buff *skb; |
| 531 | struct fq_flow *f; |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 532 | unsigned long rate; |
| 533 | u32 plen; |
Eric Dumazet | 6b015a5 | 2018-11-19 17:30:19 -0800 | [diff] [blame] | 534 | u64 now; |
| 535 | |
| 536 | if (!sch->q.qlen) |
| 537 | return NULL; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 538 | |
Eric Dumazet | c288b0c | 2020-05-02 19:54:21 -0700 | [diff] [blame] | 539 | skb = fq_peek(&q->internal); |
| 540 | if (unlikely(skb)) { |
| 541 | fq_dequeue_skb(sch, &q->internal, skb); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 542 | goto out; |
Eric Dumazet | c288b0c | 2020-05-02 19:54:21 -0700 | [diff] [blame] | 543 | } |
Eric Dumazet | 6b015a5 | 2018-11-19 17:30:19 -0800 | [diff] [blame] | 544 | |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 545 | q->ktime_cache = now = ktime_get_ns(); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 546 | fq_check_throttled(q, now); |
| 547 | begin: |
| 548 | head = &q->new_flows; |
| 549 | if (!head->first) { |
| 550 | head = &q->old_flows; |
| 551 | if (!head->first) { |
| 552 | if (q->time_next_delayed_flow != ~0ULL) |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 553 | qdisc_watchdog_schedule_range_ns(&q->watchdog, |
| 554 | q->time_next_delayed_flow, |
| 555 | q->timer_slack); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 556 | return NULL; |
| 557 | } |
| 558 | } |
| 559 | f = head->first; |
| 560 | |
| 561 | if (f->credit <= 0) { |
| 562 | f->credit += q->quantum; |
| 563 | head->first = f->next; |
| 564 | fq_flow_add_tail(&q->old_flows, f); |
| 565 | goto begin; |
| 566 | } |
| 567 | |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 568 | skb = fq_peek(f); |
Eric Dumazet | 7baf33b | 2018-10-15 09:37:55 -0700 | [diff] [blame] | 569 | if (skb) { |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 570 | u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send, |
Eric Dumazet | ab408b6 | 2018-09-21 08:51:52 -0700 | [diff] [blame] | 571 | f->time_next_packet); |
| 572 | |
| 573 | if (now < time_next_packet) { |
| 574 | head->first = f->next; |
| 575 | f->time_next_packet = time_next_packet; |
| 576 | fq_flow_set_throttled(q, f); |
| 577 | goto begin; |
| 578 | } |
Eric Dumazet | 348e289 | 2020-05-02 19:54:22 -0700 | [diff] [blame] | 579 | prefetch(&skb->end); |
Eric Dumazet | e9c43ad | 2019-10-14 10:40:32 -0700 | [diff] [blame] | 580 | if ((s64)(now - time_next_packet - q->ce_threshold) > 0) { |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 581 | INET_ECN_set_ce(skb); |
| 582 | q->stat_ce_mark++; |
| 583 | } |
Eric Dumazet | c288b0c | 2020-05-02 19:54:21 -0700 | [diff] [blame] | 584 | fq_dequeue_skb(sch, f, skb); |
| 585 | } else { |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 586 | head->first = f->next; |
| 587 | /* force a pass through old_flows to prevent starvation */ |
| 588 | if ((head == &q->new_flows) && q->old_flows.first) { |
| 589 | fq_flow_add_tail(&q->old_flows, f); |
| 590 | } else { |
| 591 | fq_flow_set_detached(f); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 592 | q->inactive_flows++; |
| 593 | } |
| 594 | goto begin; |
| 595 | } |
Eric Dumazet | 08e14fe | 2018-11-12 16:17:16 -0800 | [diff] [blame] | 596 | plen = qdisc_pkt_len(skb); |
| 597 | f->credit -= plen; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 598 | |
Eric Dumazet | 08e14fe | 2018-11-12 16:17:16 -0800 | [diff] [blame] | 599 | if (!q->rate_enable) |
Eric Dumazet | 9878196 | 2015-02-03 18:31:53 -0800 | [diff] [blame] | 600 | goto out; |
| 601 | |
Eric Dumazet | 7eec417 | 2013-10-08 15:16:00 -0700 | [diff] [blame] | 602 | rate = q->flow_max_rate; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 603 | |
Eric Dumazet | 08e14fe | 2018-11-12 16:17:16 -0800 | [diff] [blame] | 604 | /* If EDT time was provided for this skb, we need to |
| 605 | * update f->time_next_packet only if this qdisc enforces |
| 606 | * a flow max rate. |
| 607 | */ |
| 608 | if (!skb->tstamp) { |
| 609 | if (skb->sk) |
| 610 | rate = min(skb->sk->sk_pacing_rate, rate); |
| 611 | |
| 612 | if (rate <= q->low_rate_threshold) { |
| 613 | f->credit = 0; |
| 614 | } else { |
| 615 | plen = max(plen, q->quantum); |
| 616 | if (f->credit > 0) |
| 617 | goto out; |
| 618 | } |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 619 | } |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 620 | if (rate != ~0UL) { |
Eric Dumazet | 0eab5eb | 2013-10-01 09:10:16 -0700 | [diff] [blame] | 621 | u64 len = (u64)plen * NSEC_PER_SEC; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 622 | |
Eric Dumazet | 7eec417 | 2013-10-08 15:16:00 -0700 | [diff] [blame] | 623 | if (likely(rate)) |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 624 | len = div64_ul(len, rate); |
Eric Dumazet | 0eab5eb | 2013-10-01 09:10:16 -0700 | [diff] [blame] | 625 | /* Since socket rate can change later, |
Eric Dumazet | ced7a04 | 2014-11-25 08:57:29 -0800 | [diff] [blame] | 626 | * clamp the delay to 1 second. |
| 627 | * Really, providers of too big packets should be fixed ! |
Eric Dumazet | 0eab5eb | 2013-10-01 09:10:16 -0700 | [diff] [blame] | 628 | */ |
Eric Dumazet | ced7a04 | 2014-11-25 08:57:29 -0800 | [diff] [blame] | 629 | if (unlikely(len > NSEC_PER_SEC)) { |
| 630 | len = NSEC_PER_SEC; |
Eric Dumazet | 0eab5eb | 2013-10-01 09:10:16 -0700 | [diff] [blame] | 631 | q->stat_pkts_too_long++; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 632 | } |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 633 | /* Account for schedule/timers drifts. |
| 634 | * f->time_next_packet was set when prior packet was sent, |
| 635 | * and current time (@now) can be too late by tens of us. |
| 636 | */ |
| 637 | if (f->time_next_packet) |
| 638 | len -= min(len/2, now - f->time_next_packet); |
Eric Dumazet | 0eab5eb | 2013-10-01 09:10:16 -0700 | [diff] [blame] | 639 | f->time_next_packet = now + len; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 640 | } |
| 641 | out: |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 642 | qdisc_bstats_update(sch, skb); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 643 | return skb; |
| 644 | } |
| 645 | |
Eric Dumazet | e14ffdf | 2016-06-13 20:21:53 -0700 | [diff] [blame] | 646 | static void fq_flow_purge(struct fq_flow *flow) |
| 647 | { |
Eric Dumazet | eeb84aa | 2019-05-04 16:48:53 -0700 | [diff] [blame] | 648 | struct rb_node *p = rb_first(&flow->t_root); |
| 649 | |
| 650 | while (p) { |
| 651 | struct sk_buff *skb = rb_to_skb(p); |
| 652 | |
| 653 | p = rb_next(p); |
| 654 | rb_erase(&skb->rbnode, &flow->t_root); |
| 655 | rtnl_kfree_skbs(skb, skb); |
| 656 | } |
Eric Dumazet | e14ffdf | 2016-06-13 20:21:53 -0700 | [diff] [blame] | 657 | rtnl_kfree_skbs(flow->head, flow->tail); |
| 658 | flow->head = NULL; |
| 659 | flow->qlen = 0; |
| 660 | } |
| 661 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 662 | static void fq_reset(struct Qdisc *sch) |
| 663 | { |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 664 | struct fq_sched_data *q = qdisc_priv(sch); |
| 665 | struct rb_root *root; |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 666 | struct rb_node *p; |
| 667 | struct fq_flow *f; |
| 668 | unsigned int idx; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 669 | |
Eric Dumazet | e14ffdf | 2016-06-13 20:21:53 -0700 | [diff] [blame] | 670 | sch->q.qlen = 0; |
| 671 | sch->qstats.backlog = 0; |
| 672 | |
| 673 | fq_flow_purge(&q->internal); |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 674 | |
| 675 | if (!q->fq_root) |
| 676 | return; |
| 677 | |
| 678 | for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { |
| 679 | root = &q->fq_root[idx]; |
| 680 | while ((p = rb_first(root)) != NULL) { |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 681 | f = rb_entry(p, struct fq_flow, fq_node); |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 682 | rb_erase(p, root); |
| 683 | |
Eric Dumazet | e14ffdf | 2016-06-13 20:21:53 -0700 | [diff] [blame] | 684 | fq_flow_purge(f); |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 685 | |
| 686 | kmem_cache_free(fq_flow_cachep, f); |
| 687 | } |
| 688 | } |
| 689 | q->new_flows.first = NULL; |
| 690 | q->old_flows.first = NULL; |
| 691 | q->delayed = RB_ROOT; |
| 692 | q->flows = 0; |
| 693 | q->inactive_flows = 0; |
| 694 | q->throttled_flows = 0; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | static void fq_rehash(struct fq_sched_data *q, |
| 698 | struct rb_root *old_array, u32 old_log, |
| 699 | struct rb_root *new_array, u32 new_log) |
| 700 | { |
| 701 | struct rb_node *op, **np, *parent; |
| 702 | struct rb_root *oroot, *nroot; |
| 703 | struct fq_flow *of, *nf; |
| 704 | int fcnt = 0; |
| 705 | u32 idx; |
| 706 | |
| 707 | for (idx = 0; idx < (1U << old_log); idx++) { |
| 708 | oroot = &old_array[idx]; |
| 709 | while ((op = rb_first(oroot)) != NULL) { |
| 710 | rb_erase(op, oroot); |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 711 | of = rb_entry(op, struct fq_flow, fq_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 712 | if (fq_gc_candidate(of)) { |
| 713 | fcnt++; |
| 714 | kmem_cache_free(fq_flow_cachep, of); |
| 715 | continue; |
| 716 | } |
Eric Dumazet | 29c5847 | 2016-11-17 09:48:30 -0800 | [diff] [blame] | 717 | nroot = &new_array[hash_ptr(of->sk, new_log)]; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 718 | |
| 719 | np = &nroot->rb_node; |
| 720 | parent = NULL; |
| 721 | while (*np) { |
| 722 | parent = *np; |
| 723 | |
Geliang Tang | e124557 | 2016-12-20 22:02:15 +0800 | [diff] [blame] | 724 | nf = rb_entry(parent, struct fq_flow, fq_node); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 725 | BUG_ON(nf->sk == of->sk); |
| 726 | |
| 727 | if (nf->sk > of->sk) |
| 728 | np = &parent->rb_right; |
| 729 | else |
| 730 | np = &parent->rb_left; |
| 731 | } |
| 732 | |
| 733 | rb_link_node(&of->fq_node, parent, np); |
| 734 | rb_insert_color(&of->fq_node, nroot); |
| 735 | } |
| 736 | } |
| 737 | q->flows -= fcnt; |
| 738 | q->inactive_flows -= fcnt; |
| 739 | q->stat_gc_flows += fcnt; |
| 740 | } |
| 741 | |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 742 | static void fq_free(void *addr) |
| 743 | { |
WANG Cong | 4cb2897 | 2014-06-02 15:55:22 -0700 | [diff] [blame] | 744 | kvfree(addr); |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 745 | } |
| 746 | |
| 747 | static int fq_resize(struct Qdisc *sch, u32 log) |
| 748 | { |
| 749 | struct fq_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 750 | struct rb_root *array; |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 751 | void *old_fq_root; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 752 | u32 idx; |
| 753 | |
| 754 | if (q->fq_root && log == q->fq_trees_log) |
| 755 | return 0; |
| 756 | |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 757 | /* If XPS was setup, we can allocate memory on right NUMA node */ |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 758 | array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL, |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 759 | netdev_queue_numa_node_read(sch->dev_queue)); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 760 | if (!array) |
| 761 | return -ENOMEM; |
| 762 | |
| 763 | for (idx = 0; idx < (1U << log); idx++) |
| 764 | array[idx] = RB_ROOT; |
| 765 | |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 766 | sch_tree_lock(sch); |
| 767 | |
| 768 | old_fq_root = q->fq_root; |
| 769 | if (old_fq_root) |
| 770 | fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); |
| 771 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 772 | q->fq_root = array; |
| 773 | q->fq_trees_log = log; |
| 774 | |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 775 | sch_tree_unlock(sch); |
| 776 | |
| 777 | fq_free(old_fq_root); |
| 778 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 779 | return 0; |
| 780 | } |
| 781 | |
Davide Caratti | 7041101 | 2023-04-20 16:59:46 +0200 | [diff] [blame] | 782 | static struct netlink_range_validation iq_range = { |
| 783 | .max = INT_MAX, |
| 784 | }; |
| 785 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 786 | static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 787 | [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK }, |
| 788 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 789 | [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, |
| 790 | [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, |
| 791 | [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, |
Davide Caratti | 7041101 | 2023-04-20 16:59:46 +0200 | [diff] [blame] | 792 | [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range), |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 793 | [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, |
| 794 | [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, |
| 795 | [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, |
| 796 | [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 797 | [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 }, |
Jakub Kicinski | 7e6dc03 | 2020-03-02 21:05:19 -0800 | [diff] [blame] | 798 | [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 }, |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 799 | [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 }, |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 800 | [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 }, |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 801 | [TCA_FQ_TIMER_SLACK] = { .type = NLA_U32 }, |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 802 | [TCA_FQ_HORIZON] = { .type = NLA_U32 }, |
| 803 | [TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 }, |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 804 | }; |
| 805 | |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 806 | static int fq_change(struct Qdisc *sch, struct nlattr *opt, |
| 807 | struct netlink_ext_ack *extack) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 808 | { |
| 809 | struct fq_sched_data *q = qdisc_priv(sch); |
| 810 | struct nlattr *tb[TCA_FQ_MAX + 1]; |
| 811 | int err, drop_count = 0; |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 812 | unsigned drop_len = 0; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 813 | u32 fq_log; |
| 814 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 815 | err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy, |
| 816 | NULL); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 817 | if (err < 0) |
| 818 | return err; |
| 819 | |
| 820 | sch_tree_lock(sch); |
| 821 | |
| 822 | fq_log = q->fq_trees_log; |
| 823 | |
| 824 | if (tb[TCA_FQ_BUCKETS_LOG]) { |
| 825 | u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]); |
| 826 | |
| 827 | if (nval >= 1 && nval <= ilog2(256*1024)) |
| 828 | fq_log = nval; |
| 829 | else |
| 830 | err = -EINVAL; |
| 831 | } |
| 832 | if (tb[TCA_FQ_PLIMIT]) |
| 833 | sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]); |
| 834 | |
| 835 | if (tb[TCA_FQ_FLOW_PLIMIT]) |
| 836 | q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); |
| 837 | |
Kenneth Klette Jonassen | 3725a26 | 2015-02-03 17:49:18 +0100 | [diff] [blame] | 838 | if (tb[TCA_FQ_QUANTUM]) { |
| 839 | u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]); |
| 840 | |
Eric Dumazet | d9e15a2 | 2020-01-06 06:10:39 -0800 | [diff] [blame] | 841 | if (quantum > 0 && quantum <= (1 << 20)) { |
Kenneth Klette Jonassen | 3725a26 | 2015-02-03 17:49:18 +0100 | [diff] [blame] | 842 | q->quantum = quantum; |
Eric Dumazet | d9e15a2 | 2020-01-06 06:10:39 -0800 | [diff] [blame] | 843 | } else { |
| 844 | NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); |
Kenneth Klette Jonassen | 3725a26 | 2015-02-03 17:49:18 +0100 | [diff] [blame] | 845 | err = -EINVAL; |
Eric Dumazet | d9e15a2 | 2020-01-06 06:10:39 -0800 | [diff] [blame] | 846 | } |
Kenneth Klette Jonassen | 3725a26 | 2015-02-03 17:49:18 +0100 | [diff] [blame] | 847 | } |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 848 | |
| 849 | if (tb[TCA_FQ_INITIAL_QUANTUM]) |
Eric Dumazet | ede869c | 2013-10-07 12:50:18 -0700 | [diff] [blame] | 850 | q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 851 | |
| 852 | if (tb[TCA_FQ_FLOW_DEFAULT_RATE]) |
Eric Dumazet | 65c5189 | 2013-11-15 08:57:26 -0800 | [diff] [blame] | 853 | pr_warn_ratelimited("sch_fq: defrate %u ignored.\n", |
| 854 | nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE])); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 855 | |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 856 | if (tb[TCA_FQ_FLOW_MAX_RATE]) { |
| 857 | u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 858 | |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 859 | q->flow_max_rate = (rate == ~0U) ? ~0UL : rate; |
| 860 | } |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 861 | if (tb[TCA_FQ_LOW_RATE_THRESHOLD]) |
| 862 | q->low_rate_threshold = |
| 863 | nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]); |
| 864 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 865 | if (tb[TCA_FQ_RATE_ENABLE]) { |
| 866 | u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]); |
| 867 | |
| 868 | if (enable <= 1) |
| 869 | q->rate_enable = enable; |
| 870 | else |
| 871 | err = -EINVAL; |
| 872 | } |
| 873 | |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 874 | if (tb[TCA_FQ_FLOW_REFILL_DELAY]) { |
| 875 | u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ; |
| 876 | |
| 877 | q->flow_refill_delay = usecs_to_jiffies(usecs_delay); |
| 878 | } |
| 879 | |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 880 | if (tb[TCA_FQ_ORPHAN_MASK]) |
| 881 | q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); |
| 882 | |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 883 | if (tb[TCA_FQ_CE_THRESHOLD]) |
| 884 | q->ce_threshold = (u64)NSEC_PER_USEC * |
| 885 | nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]); |
| 886 | |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 887 | if (tb[TCA_FQ_TIMER_SLACK]) |
| 888 | q->timer_slack = nla_get_u32(tb[TCA_FQ_TIMER_SLACK]); |
| 889 | |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 890 | if (tb[TCA_FQ_HORIZON]) |
| 891 | q->horizon = (u64)NSEC_PER_USEC * |
| 892 | nla_get_u32(tb[TCA_FQ_HORIZON]); |
| 893 | |
| 894 | if (tb[TCA_FQ_HORIZON_DROP]) |
| 895 | q->horizon_drop = nla_get_u8(tb[TCA_FQ_HORIZON_DROP]); |
| 896 | |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 897 | if (!err) { |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 898 | |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 899 | sch_tree_unlock(sch); |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 900 | err = fq_resize(sch, fq_log); |
Eric Dumazet | 2d8d40a | 2014-03-06 22:57:52 -0800 | [diff] [blame] | 901 | sch_tree_lock(sch); |
| 902 | } |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 903 | while (sch->q.qlen > sch->limit) { |
| 904 | struct sk_buff *skb = fq_dequeue(sch); |
| 905 | |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 906 | if (!skb) |
| 907 | break; |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 908 | drop_len += qdisc_pkt_len(skb); |
Eric Dumazet | e14ffdf | 2016-06-13 20:21:53 -0700 | [diff] [blame] | 909 | rtnl_kfree_skbs(skb, skb); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 910 | drop_count++; |
| 911 | } |
WANG Cong | 2ccccf5 | 2016-02-25 14:55:01 -0800 | [diff] [blame] | 912 | qdisc_tree_reduce_backlog(sch, drop_count, drop_len); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 913 | |
| 914 | sch_tree_unlock(sch); |
| 915 | return err; |
| 916 | } |
| 917 | |
| 918 | static void fq_destroy(struct Qdisc *sch) |
| 919 | { |
| 920 | struct fq_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 921 | |
Eric Dumazet | 8d34ce1 | 2013-09-27 14:20:01 -0700 | [diff] [blame] | 922 | fq_reset(sch); |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 923 | fq_free(q->fq_root); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 924 | qdisc_watchdog_cancel(&q->watchdog); |
| 925 | } |
| 926 | |
Alexander Aring | e63d7dfd | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 927 | static int fq_init(struct Qdisc *sch, struct nlattr *opt, |
| 928 | struct netlink_ext_ack *extack) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 929 | { |
| 930 | struct fq_sched_data *q = qdisc_priv(sch); |
| 931 | int err; |
| 932 | |
| 933 | sch->limit = 10000; |
| 934 | q->flow_plimit = 100; |
| 935 | q->quantum = 2 * psched_mtu(qdisc_dev(sch)); |
| 936 | q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 937 | q->flow_refill_delay = msecs_to_jiffies(40); |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 938 | q->flow_max_rate = ~0UL; |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 939 | q->time_next_delayed_flow = ~0ULL; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 940 | q->rate_enable = 1; |
| 941 | q->new_flows.first = NULL; |
| 942 | q->old_flows.first = NULL; |
| 943 | q->delayed = RB_ROOT; |
| 944 | q->fq_root = NULL; |
| 945 | q->fq_trees_log = ilog2(1024); |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 946 | q->orphan_mask = 1024 - 1; |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 947 | q->low_rate_threshold = 550000 / 8; |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 948 | |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 949 | q->timer_slack = 10 * NSEC_PER_USEC; /* 10 usec of hrtimer slack */ |
| 950 | |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 951 | q->horizon = 10ULL * NSEC_PER_SEC; /* 10 seconds */ |
| 952 | q->horizon_drop = 1; /* by default, drop packets beyond horizon */ |
| 953 | |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 954 | /* Default ce_threshold of 4294 seconds */ |
| 955 | q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; |
| 956 | |
Eric Dumazet | fb420d5 | 2018-09-28 10:28:44 -0700 | [diff] [blame] | 957 | qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 958 | |
| 959 | if (opt) |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 960 | err = fq_change(sch, opt, extack); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 961 | else |
Eric Dumazet | c3bd854 | 2013-12-15 13:15:25 -0800 | [diff] [blame] | 962 | err = fq_resize(sch, q->fq_trees_log); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 963 | |
| 964 | return err; |
| 965 | } |
| 966 | |
| 967 | static int fq_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 968 | { |
| 969 | struct fq_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 970 | u64 ce_threshold = q->ce_threshold; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 971 | u64 horizon = q->horizon; |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 972 | struct nlattr *opts; |
| 973 | |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 974 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 975 | if (opts == NULL) |
| 976 | goto nla_put_failure; |
| 977 | |
Eric Dumazet | 65c5189 | 2013-11-15 08:57:26 -0800 | [diff] [blame] | 978 | /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */ |
| 979 | |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 980 | do_div(ce_threshold, NSEC_PER_USEC); |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 981 | do_div(horizon, NSEC_PER_USEC); |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 982 | |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 983 | if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) || |
| 984 | nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || |
| 985 | nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || |
| 986 | nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || |
| 987 | nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || |
Eric Dumazet | 76a9ebe | 2018-10-15 09:37:53 -0700 | [diff] [blame] | 988 | nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, |
| 989 | min_t(unsigned long, q->flow_max_rate, ~0U)) || |
Eric Dumazet | f52ed89 | 2013-11-15 08:58:14 -0800 | [diff] [blame] | 990 | nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY, |
| 991 | jiffies_to_usecs(q->flow_refill_delay)) || |
Eric Dumazet | 06eb395 | 2015-02-04 21:30:40 -0800 | [diff] [blame] | 992 | nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || |
Eric Dumazet | 7787914 | 2016-09-19 23:39:11 -0400 | [diff] [blame] | 993 | nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD, |
| 994 | q->low_rate_threshold) || |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 995 | nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) || |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 996 | nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log) || |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 997 | nla_put_u32(skb, TCA_FQ_TIMER_SLACK, q->timer_slack) || |
| 998 | nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) || |
| 999 | nla_put_u8(skb, TCA_FQ_HORIZON_DROP, q->horizon_drop)) |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 1000 | goto nla_put_failure; |
| 1001 | |
Yang Yingliang | d59b7d8 | 2014-03-12 10:20:32 +0800 | [diff] [blame] | 1002 | return nla_nest_end(skb, opts); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 1003 | |
| 1004 | nla_put_failure: |
| 1005 | return -1; |
| 1006 | } |
| 1007 | |
| 1008 | static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
| 1009 | { |
| 1010 | struct fq_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | 695b4ec | 2016-09-15 16:20:01 -0700 | [diff] [blame] | 1011 | struct tc_fq_qd_stats st; |
| 1012 | |
| 1013 | sch_tree_lock(sch); |
| 1014 | |
| 1015 | st.gc_flows = q->stat_gc_flows; |
| 1016 | st.highprio_packets = q->stat_internal_packets; |
Eric Dumazet | 90caf67 | 2018-09-21 08:51:54 -0700 | [diff] [blame] | 1017 | st.tcp_retrans = 0; |
Eric Dumazet | 695b4ec | 2016-09-15 16:20:01 -0700 | [diff] [blame] | 1018 | st.throttled = q->stat_throttled; |
| 1019 | st.flows_plimit = q->stat_flows_plimit; |
| 1020 | st.pkts_too_long = q->stat_pkts_too_long; |
| 1021 | st.allocation_errors = q->stat_allocation_errors; |
Eric Dumazet | 583396f | 2020-03-16 19:12:51 -0700 | [diff] [blame] | 1022 | st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - |
| 1023 | ktime_get_ns(); |
Eric Dumazet | 695b4ec | 2016-09-15 16:20:01 -0700 | [diff] [blame] | 1024 | st.flows = q->flows; |
| 1025 | st.inactive_flows = q->inactive_flows; |
| 1026 | st.throttled_flows = q->throttled_flows; |
Eric Dumazet | fefa569 | 2016-09-22 08:58:55 -0700 | [diff] [blame] | 1027 | st.unthrottle_latency_ns = min_t(unsigned long, |
| 1028 | q->unthrottle_latency_ns, ~0U); |
Eric Dumazet | 48872c1 | 2018-11-11 09:11:31 -0800 | [diff] [blame] | 1029 | st.ce_mark = q->stat_ce_mark; |
Eric Dumazet | 39d0105 | 2020-05-01 07:07:41 -0700 | [diff] [blame] | 1030 | st.horizon_drops = q->stat_horizon_drops; |
| 1031 | st.horizon_caps = q->stat_horizon_caps; |
Eric Dumazet | 695b4ec | 2016-09-15 16:20:01 -0700 | [diff] [blame] | 1032 | sch_tree_unlock(sch); |
Eric Dumazet | afe4fd0 | 2013-08-29 15:49:55 -0700 | [diff] [blame] | 1033 | |
| 1034 | return gnet_stats_copy_app(d, &st, sizeof(st)); |
| 1035 | } |
| 1036 | |
| 1037 | static struct Qdisc_ops fq_qdisc_ops __read_mostly = { |
| 1038 | .id = "fq", |
| 1039 | .priv_size = sizeof(struct fq_sched_data), |
| 1040 | |
| 1041 | .enqueue = fq_enqueue, |
| 1042 | .dequeue = fq_dequeue, |
| 1043 | .peek = qdisc_peek_dequeued, |
| 1044 | .init = fq_init, |
| 1045 | .reset = fq_reset, |
| 1046 | .destroy = fq_destroy, |
| 1047 | .change = fq_change, |
| 1048 | .dump = fq_dump, |
| 1049 | .dump_stats = fq_dump_stats, |
| 1050 | .owner = THIS_MODULE, |
| 1051 | }; |
| 1052 | |
| 1053 | static int __init fq_module_init(void) |
| 1054 | { |
| 1055 | int ret; |
| 1056 | |
| 1057 | fq_flow_cachep = kmem_cache_create("fq_flow_cache", |
| 1058 | sizeof(struct fq_flow), |
| 1059 | 0, 0, NULL); |
| 1060 | if (!fq_flow_cachep) |
| 1061 | return -ENOMEM; |
| 1062 | |
| 1063 | ret = register_qdisc(&fq_qdisc_ops); |
| 1064 | if (ret) |
| 1065 | kmem_cache_destroy(fq_flow_cachep); |
| 1066 | return ret; |
| 1067 | } |
| 1068 | |
| 1069 | static void __exit fq_module_exit(void) |
| 1070 | { |
| 1071 | unregister_qdisc(&fq_qdisc_ops); |
| 1072 | kmem_cache_destroy(fq_flow_cachep); |
| 1073 | } |
| 1074 | |
| 1075 | module_init(fq_module_init) |
| 1076 | module_exit(fq_module_exit) |
| 1077 | MODULE_AUTHOR("Eric Dumazet"); |
| 1078 | MODULE_LICENSE("GPL"); |
Rob Gill | 67c20de | 2020-06-20 02:08:25 +0000 | [diff] [blame] | 1079 | MODULE_DESCRIPTION("Fair Queue Packet Scheduler"); |