blob: befaf74b33caa2d4b0b80c826191c3b2a9222ea1 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
stephen hemminger0545a302011-04-04 05:30:58 +00002/*
Paolo Valente462dbc92012-11-23 11:03:19 +00003 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
stephen hemminger0545a302011-04-04 05:30:58 +00004 *
5 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
Paolo Valente462dbc92012-11-23 11:03:19 +00006 * Copyright (c) 2012 Paolo Valente.
stephen hemminger0545a302011-04-04 05:30:58 +00007 */
8
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/bitops.h>
12#include <linux/errno.h>
13#include <linux/netdevice.h>
14#include <linux/pkt_sched.h>
15#include <net/sch_generic.h>
16#include <net/pkt_sched.h>
17#include <net/pkt_cls.h>
18
19
Paolo Valente462dbc92012-11-23 11:03:19 +000020/* Quick Fair Queueing Plus
21 ========================
stephen hemminger0545a302011-04-04 05:30:58 +000022
23 Sources:
24
Paolo Valente462dbc92012-11-23 11:03:19 +000025 [1] Paolo Valente,
26 "Reducing the Execution Time of Fair-Queueing Schedulers."
27 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
28
29 Sources for QFQ:
30
31 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
stephen hemminger0545a302011-04-04 05:30:58 +000032 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
33
34 See also:
35 http://retis.sssup.it/~fabio/linux/qfq/
36 */
37
38/*
39
Paolo Valente462dbc92012-11-23 11:03:19 +000040 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
41 classes. Each aggregate is timestamped with a virtual start time S
42 and a virtual finish time F, and scheduled according to its
43 timestamps. S and F are computed as a function of a system virtual
44 time function V. The classes within each aggregate are instead
45 scheduled with DRR.
46
47 To speed up operations, QFQ+ divides also aggregates into a limited
48 number of groups. Which group a class belongs to depends on the
49 ratio between the maximum packet length for the class and the weight
50 of the class. Groups have their own S and F. In the end, QFQ+
51 schedules groups, then aggregates within groups, then classes within
52 aggregates. See [1] and [2] for a full description.
53
stephen hemminger0545a302011-04-04 05:30:58 +000054 Virtual time computations.
55
56 S, F and V are all computed in fixed point arithmetic with
57 FRAC_BITS decimal bits.
58
59 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
60 one bit per index.
61 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
62
63 The layout of the bits is as below:
64
65 [ MTU_SHIFT ][ FRAC_BITS ]
66 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
67 ^.__grp->index = 0
68 *.__grp->slot_shift
69
70 where MIN_SLOT_SHIFT is derived by difference from the others.
71
72 The max group index corresponds to Lmax/w_min, where
73 Lmax=1<<MTU_SHIFT, w_min = 1 .
74 From this, and knowing how many groups (MAX_INDEX) we want,
75 we can derive the shift corresponding to each group.
76
77 Because we often need to compute
78 F = S + len/w_i and V = V + len/wsum
79 instead of storing w_i store the value
80 inv_w = (1<<FRAC_BITS)/w_i
81 so we can do F = S + len * inv_w * wsum.
82 We use W_TOT in the formulas so we can easily move between
83 static and adaptive weight sum.
84
85 The per-scheduler-instance data contain all the data structures
86 for the scheduler: bitmaps and bucket lists.
87
88 */
89
90/*
91 * Maximum number of consecutive slots occupied by backlogged classes
92 * inside a group.
93 */
94#define QFQ_MAX_SLOTS 32
95
96/*
Paolo Valente462dbc92012-11-23 11:03:19 +000097 * Shifts used for aggregate<->group mapping. We allow class weights that are
98 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
stephen hemminger0545a302011-04-04 05:30:58 +000099 * group with the smallest index that can support the L_i / r_i configured
Paolo Valente462dbc92012-11-23 11:03:19 +0000100 * for the classes in the aggregate.
stephen hemminger0545a302011-04-04 05:30:58 +0000101 *
102 * grp->index is the index of the group; and grp->slot_shift
103 * is the shift for the corresponding (scaled) sigma_i.
104 */
Paolo Valente3015f3d2012-11-05 20:29:24 +0000105#define QFQ_MAX_INDEX 24
Paolo Valente462dbc92012-11-23 11:03:19 +0000106#define QFQ_MAX_WSHIFT 10
stephen hemminger0545a302011-04-04 05:30:58 +0000107
Paolo Valente462dbc92012-11-23 11:03:19 +0000108#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
109#define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
stephen hemminger0545a302011-04-04 05:30:58 +0000110
111#define FRAC_BITS 30 /* fixed point arithmetic */
112#define ONE_FP (1UL << FRAC_BITS)
stephen hemminger0545a302011-04-04 05:30:58 +0000113
Paolo Valente3015f3d2012-11-05 20:29:24 +0000114#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
Paolo Valente462dbc92012-11-23 11:03:19 +0000115#define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
Pedro Tammela25369892023-04-22 12:56:11 -0300116#define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT)
Paolo Valente462dbc92012-11-23 11:03:19 +0000117
118#define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
stephen hemminger0545a302011-04-04 05:30:58 +0000119
120/*
121 * Possible group states. These values are used as indexes for the bitmaps
122 * array of struct qfq_queue.
123 */
124enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
125
126struct qfq_group;
127
Paolo Valente462dbc92012-11-23 11:03:19 +0000128struct qfq_aggregate;
129
stephen hemminger0545a302011-04-04 05:30:58 +0000130struct qfq_class {
131 struct Qdisc_class_common common;
132
stephen hemminger0545a302011-04-04 05:30:58 +0000133 unsigned int filter_cnt;
134
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200135 struct gnet_stats_basic_sync bstats;
stephen hemminger0545a302011-04-04 05:30:58 +0000136 struct gnet_stats_queue qstats;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800137 struct net_rate_estimator __rcu *rate_est;
stephen hemminger0545a302011-04-04 05:30:58 +0000138 struct Qdisc *qdisc;
Paolo Valente462dbc92012-11-23 11:03:19 +0000139 struct list_head alist; /* Link for active-classes list. */
140 struct qfq_aggregate *agg; /* Parent aggregate. */
141 int deficit; /* DRR deficit counter. */
142};
stephen hemminger0545a302011-04-04 05:30:58 +0000143
Paolo Valente462dbc92012-11-23 11:03:19 +0000144struct qfq_aggregate {
stephen hemminger0545a302011-04-04 05:30:58 +0000145 struct hlist_node next; /* Link for the slot list. */
146 u64 S, F; /* flow timestamps (exact) */
147
148 /* group we belong to. In principle we would need the index,
149 * which is log_2(lmax/weight), but we never reference it
150 * directly, only the group.
151 */
152 struct qfq_group *grp;
153
154 /* these are copied from the flowset. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000155 u32 class_weight; /* Weight of each class in this aggregate. */
156 /* Max pkt size for the classes in this aggregate, DRR quantum. */
157 int lmax;
158
159 u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
160 u32 budgetmax; /* Max budget for this aggregate. */
161 u32 initial_budget, budget; /* Initial and current budget. */
162
163 int num_classes; /* Number of classes in this aggr. */
164 struct list_head active; /* DRR queue of active classes. */
165
166 struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
stephen hemminger0545a302011-04-04 05:30:58 +0000167};
168
169struct qfq_group {
170 u64 S, F; /* group timestamps (approx). */
171 unsigned int slot_shift; /* Slot shift. */
172 unsigned int index; /* Group index. */
173 unsigned int front; /* Index of the front slot. */
174 unsigned long full_slots; /* non-empty slots */
175
Paolo Valente462dbc92012-11-23 11:03:19 +0000176 /* Array of RR lists of active aggregates. */
stephen hemminger0545a302011-04-04 05:30:58 +0000177 struct hlist_head slots[QFQ_MAX_SLOTS];
178};
179
180struct qfq_sched {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700181 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200182 struct tcf_block *block;
stephen hemminger0545a302011-04-04 05:30:58 +0000183 struct Qdisc_class_hash clhash;
184
Paolo Valente462dbc92012-11-23 11:03:19 +0000185 u64 oldV, V; /* Precise virtual times. */
186 struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000187 u32 wsum; /* weight sum */
Paolo Valente87f40dd2013-07-16 08:52:30 +0200188 u32 iwsum; /* inverse weight sum */
stephen hemminger0545a302011-04-04 05:30:58 +0000189
190 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
191 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
Paolo Valente462dbc92012-11-23 11:03:19 +0000192 u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
193
194 u32 max_agg_classes; /* Max number of classes per aggr. */
195 struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
stephen hemminger0545a302011-04-04 05:30:58 +0000196};
197
Paolo Valente462dbc92012-11-23 11:03:19 +0000198/*
199 * Possible reasons why the timestamps of an aggregate are updated
200 * enqueue: the aggregate switches from idle to active and must scheduled
201 * for service
202 * requeue: the aggregate finishes its budget, so it stops being served and
203 * must be rescheduled for service
204 */
205enum update_reason {enqueue, requeue};
206
stephen hemminger0545a302011-04-04 05:30:58 +0000207static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
208{
209 struct qfq_sched *q = qdisc_priv(sch);
210 struct Qdisc_class_common *clc;
211
212 clc = qdisc_class_find(&q->clhash, classid);
213 if (clc == NULL)
214 return NULL;
215 return container_of(clc, struct qfq_class, common);
216}
217
Pedro Tammela25369892023-04-22 12:56:11 -0300218static struct netlink_range_validation lmax_range = {
219 .min = QFQ_MIN_LMAX,
220 .max = QFQ_MAX_LMAX,
221};
222
stephen hemminger0545a302011-04-04 05:30:58 +0000223static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
Pedro Tammela25369892023-04-22 12:56:11 -0300224 [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT),
225 [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range),
stephen hemminger0545a302011-04-04 05:30:58 +0000226};
227
228/*
229 * Calculate a flow index, given its weight and maximum packet length.
230 * index = log_2(maxlen/weight) but we need to apply the scaling.
231 * This is used only once at flow creation.
232 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000233static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
stephen hemminger0545a302011-04-04 05:30:58 +0000234{
235 u64 slot_size = (u64)maxlen * inv_w;
236 unsigned long size_map;
237 int index = 0;
238
Paolo Valente462dbc92012-11-23 11:03:19 +0000239 size_map = slot_size >> min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000240 if (!size_map)
241 goto out;
242
243 index = __fls(size_map) + 1; /* basically a log_2 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000244 index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
stephen hemminger0545a302011-04-04 05:30:58 +0000245
246 if (index < 0)
247 index = 0;
248out:
249 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
250 (unsigned long) ONE_FP/inv_w, maxlen, index);
251
252 return index;
253}
254
Paolo Valente462dbc92012-11-23 11:03:19 +0000255static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
256static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
257 enum update_reason);
Paolo Valentebe72f632012-08-07 07:27:25 +0000258
Paolo Valente462dbc92012-11-23 11:03:19 +0000259static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
260 u32 lmax, u32 weight)
261{
262 INIT_LIST_HEAD(&agg->active);
263 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
264
265 agg->lmax = lmax;
266 agg->class_weight = weight;
Paolo Valentebe72f632012-08-07 07:27:25 +0000267}
268
Paolo Valente462dbc92012-11-23 11:03:19 +0000269static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
270 u32 lmax, u32 weight)
Paolo Valentebe72f632012-08-07 07:27:25 +0000271{
Paolo Valente462dbc92012-11-23 11:03:19 +0000272 struct qfq_aggregate *agg;
Paolo Valentebe72f632012-08-07 07:27:25 +0000273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
Paolo Valente462dbc92012-11-23 11:03:19 +0000275 if (agg->lmax == lmax && agg->class_weight == weight)
276 return agg;
Paolo Valentebe72f632012-08-07 07:27:25 +0000277
Paolo Valente462dbc92012-11-23 11:03:19 +0000278 return NULL;
Paolo Valentebe72f632012-08-07 07:27:25 +0000279}
280
Paolo Valente3015f3d2012-11-05 20:29:24 +0000281
Paolo Valente462dbc92012-11-23 11:03:19 +0000282/* Update aggregate as a function of the new number of classes. */
283static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
284 int new_num_classes)
285{
286 u32 new_agg_weight;
287
288 if (new_num_classes == q->max_agg_classes)
289 hlist_del_init(&agg->nonfull_next);
290
291 if (agg->num_classes > new_num_classes &&
292 new_num_classes == q->max_agg_classes - 1) /* agg no more full */
293 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
294
Paolo Valente9b99b7e2013-03-05 08:04:57 +0000295 /* The next assignment may let
296 * agg->initial_budget > agg->budgetmax
297 * hold, we will take it into account in charge_actual_service().
298 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000299 agg->budgetmax = new_num_classes * agg->lmax;
300 new_agg_weight = agg->class_weight * new_num_classes;
301 agg->inv_w = ONE_FP/new_agg_weight;
302
303 if (agg->grp == NULL) {
304 int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
305 q->min_slot_shift);
306 agg->grp = &q->groups[i];
Paolo Valente3015f3d2012-11-05 20:29:24 +0000307 }
308
Paolo Valente462dbc92012-11-23 11:03:19 +0000309 q->wsum +=
310 (int) agg->class_weight * (new_num_classes - agg->num_classes);
Paolo Valente87f40dd2013-07-16 08:52:30 +0200311 q->iwsum = ONE_FP / q->wsum;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000312
Paolo Valente462dbc92012-11-23 11:03:19 +0000313 agg->num_classes = new_num_classes;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000314}
315
Paolo Valente462dbc92012-11-23 11:03:19 +0000316/* Add class to aggregate. */
317static void qfq_add_to_agg(struct qfq_sched *q,
318 struct qfq_aggregate *agg,
319 struct qfq_class *cl)
320{
321 cl->agg = agg;
322
323 qfq_update_agg(q, agg, agg->num_classes+1);
324 if (cl->qdisc->q.qlen > 0) { /* adding an active class */
325 list_add_tail(&cl->alist, &agg->active);
326 if (list_first_entry(&agg->active, struct qfq_class, alist) ==
327 cl && q->in_serv_agg != agg) /* agg was inactive */
328 qfq_activate_agg(q, agg, enqueue); /* schedule agg */
329 }
330}
331
332static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
333
334static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
335{
Andrea Parria55e1c52015-06-17 00:16:59 +0200336 hlist_del_init(&agg->nonfull_next);
Paolo Valente87f40dd2013-07-16 08:52:30 +0200337 q->wsum -= agg->class_weight;
338 if (q->wsum != 0)
339 q->iwsum = ONE_FP / q->wsum;
340
Paolo Valente462dbc92012-11-23 11:03:19 +0000341 if (q->in_serv_agg == agg)
342 q->in_serv_agg = qfq_choose_next_agg(q);
343 kfree(agg);
344}
345
346/* Deschedule class from within its parent aggregate. */
347static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
348{
349 struct qfq_aggregate *agg = cl->agg;
350
351
352 list_del(&cl->alist); /* remove from RR queue of the aggregate */
353 if (list_empty(&agg->active)) /* agg is now inactive */
354 qfq_deactivate_agg(q, agg);
355}
356
357/* Remove class from its parent aggregate. */
358static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
359{
360 struct qfq_aggregate *agg = cl->agg;
361
362 cl->agg = NULL;
363 if (agg->num_classes == 1) { /* agg being emptied, destroy it */
364 qfq_destroy_agg(q, agg);
365 return;
366 }
367 qfq_update_agg(q, agg, agg->num_classes-1);
368}
369
370/* Deschedule class and remove it from its parent aggregate. */
371static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
372{
373 if (cl->qdisc->q.qlen > 0) /* class is active */
374 qfq_deactivate_class(q, cl);
375
376 qfq_rm_from_agg(q, cl);
377}
378
379/* Move class to a new aggregate, matching the new class weight and/or lmax */
380static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
381 u32 lmax)
382{
383 struct qfq_sched *q = qdisc_priv(sch);
Pedro Tammela3e337082023-07-11 18:01:02 -0300384 struct qfq_aggregate *new_agg;
Paolo Valente462dbc92012-11-23 11:03:19 +0000385
Pedro Tammela3e337082023-07-11 18:01:02 -0300386 /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */
387 if (lmax > QFQ_MAX_LMAX)
388 return -EINVAL;
389
390 new_agg = qfq_find_agg(q, lmax, weight);
Paolo Valente462dbc92012-11-23 11:03:19 +0000391 if (new_agg == NULL) { /* create new aggregate */
392 new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
393 if (new_agg == NULL)
394 return -ENOBUFS;
395 qfq_init_agg(q, new_agg, lmax, weight);
396 }
397 qfq_deact_rm_from_agg(q, cl);
398 qfq_add_to_agg(q, new_agg, cl);
399
400 return 0;
401}
Paolo Valente3015f3d2012-11-05 20:29:24 +0000402
stephen hemminger0545a302011-04-04 05:30:58 +0000403static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
Alexander Aring793d81d2017-12-20 12:35:15 -0500404 struct nlattr **tca, unsigned long *arg,
405 struct netlink_ext_ack *extack)
stephen hemminger0545a302011-04-04 05:30:58 +0000406{
407 struct qfq_sched *q = qdisc_priv(sch);
408 struct qfq_class *cl = (struct qfq_class *)*arg;
Paolo Valente462dbc92012-11-23 11:03:19 +0000409 bool existing = false;
stephen hemminger0545a302011-04-04 05:30:58 +0000410 struct nlattr *tb[TCA_QFQ_MAX + 1];
Paolo Valente462dbc92012-11-23 11:03:19 +0000411 struct qfq_aggregate *new_agg = NULL;
stephen hemminger0545a302011-04-04 05:30:58 +0000412 u32 weight, lmax, inv_w;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000413 int err;
Eric Dumazetd32ae76f2012-01-02 11:47:50 +0000414 int delta_w;
stephen hemminger0545a302011-04-04 05:30:58 +0000415
Pedro Tammelac69a9b02023-04-22 12:56:10 -0300416 if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) {
417 NL_SET_ERR_MSG_MOD(extack, "missing options");
stephen hemminger0545a302011-04-04 05:30:58 +0000418 return -EINVAL;
419 }
420
Johannes Berg8cb08172019-04-26 14:07:28 +0200421 err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
Pedro Tammela25369892023-04-22 12:56:11 -0300422 qfq_policy, extack);
stephen hemminger0545a302011-04-04 05:30:58 +0000423 if (err < 0)
424 return err;
425
Pedro Tammela25369892023-04-22 12:56:11 -0300426 if (tb[TCA_QFQ_WEIGHT])
stephen hemminger0545a302011-04-04 05:30:58 +0000427 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
Pedro Tammela25369892023-04-22 12:56:11 -0300428 else
stephen hemminger0545a302011-04-04 05:30:58 +0000429 weight = 1;
430
Pedro Tammela158810b2023-07-11 18:01:00 -0300431 if (tb[TCA_QFQ_LMAX]) {
stephen hemminger0545a302011-04-04 05:30:58 +0000432 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
Pedro Tammela158810b2023-07-11 18:01:00 -0300433 } else {
434 /* MTU size is user controlled */
Paolo Valente3015f3d2012-11-05 20:29:24 +0000435 lmax = psched_mtu(qdisc_dev(sch));
Pedro Tammela158810b2023-07-11 18:01:00 -0300436 if (lmax < QFQ_MIN_LMAX || lmax > QFQ_MAX_LMAX) {
437 NL_SET_ERR_MSG_MOD(extack,
438 "MTU size out of bounds for qfq");
439 return -EINVAL;
440 }
441 }
stephen hemminger0545a302011-04-04 05:30:58 +0000442
Paolo Valente462dbc92012-11-23 11:03:19 +0000443 inv_w = ONE_FP / weight;
444 weight = ONE_FP / inv_w;
445
446 if (cl != NULL &&
447 lmax == cl->agg->lmax &&
448 weight == cl->agg->class_weight)
449 return 0; /* nothing to change */
450
451 delta_w = weight - (cl ? cl->agg->class_weight : 0);
452
453 if (q->wsum + delta_w > QFQ_MAX_WSUM) {
Pedro Tammelac69a9b02023-04-22 12:56:10 -0300454 NL_SET_ERR_MSG_FMT_MOD(extack,
455 "total weight out of range (%d + %u)\n",
456 delta_w, q->wsum);
Paolo Valente462dbc92012-11-23 11:03:19 +0000457 return -EINVAL;
458 }
459
460 if (cl != NULL) { /* modify existing class */
stephen hemminger0545a302011-04-04 05:30:58 +0000461 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700462 err = gen_replace_estimator(&cl->bstats, NULL,
463 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700464 NULL,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +0200465 true,
stephen hemminger0545a302011-04-04 05:30:58 +0000466 tca[TCA_RATE]);
467 if (err)
468 return err;
469 }
Paolo Valente462dbc92012-11-23 11:03:19 +0000470 existing = true;
471 goto set_change_agg;
stephen hemminger0545a302011-04-04 05:30:58 +0000472 }
473
Paolo Valente462dbc92012-11-23 11:03:19 +0000474 /* create and init new class */
stephen hemminger0545a302011-04-04 05:30:58 +0000475 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
476 if (cl == NULL)
477 return -ENOBUFS;
478
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200479 gnet_stats_basic_sync_init(&cl->bstats);
stephen hemminger0545a302011-04-04 05:30:58 +0000480 cl->common.classid = classid;
Paolo Valente462dbc92012-11-23 11:03:19 +0000481 cl->deficit = lmax;
stephen hemminger0545a302011-04-04 05:30:58 +0000482
Alexander Aringa38a98822017-12-20 12:35:21 -0500483 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
484 classid, NULL);
stephen hemminger0545a302011-04-04 05:30:58 +0000485 if (cl->qdisc == NULL)
486 cl->qdisc = &noop_qdisc;
487
488 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -0700489 err = gen_new_estimator(&cl->bstats, NULL,
490 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700491 NULL,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +0200492 true,
stephen hemminger0545a302011-04-04 05:30:58 +0000493 tca[TCA_RATE]);
Paolo Valente462dbc92012-11-23 11:03:19 +0000494 if (err)
495 goto destroy_class;
stephen hemminger0545a302011-04-04 05:30:58 +0000496 }
stephen hemminger0545a302011-04-04 05:30:58 +0000497
Jiri Kosina49b49972017-03-08 16:03:32 +0100498 if (cl->qdisc != &noop_qdisc)
499 qdisc_hash_add(cl->qdisc, true);
stephen hemminger0545a302011-04-04 05:30:58 +0000500
Paolo Valente462dbc92012-11-23 11:03:19 +0000501set_change_agg:
502 sch_tree_lock(sch);
503 new_agg = qfq_find_agg(q, lmax, weight);
504 if (new_agg == NULL) { /* create new aggregate */
505 sch_tree_unlock(sch);
506 new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
507 if (new_agg == NULL) {
508 err = -ENOBUFS;
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800509 gen_kill_estimator(&cl->rate_est);
Paolo Valente462dbc92012-11-23 11:03:19 +0000510 goto destroy_class;
511 }
512 sch_tree_lock(sch);
513 qfq_init_agg(q, new_agg, lmax, weight);
514 }
515 if (existing)
516 qfq_deact_rm_from_agg(q, cl);
Eric Dumazet0cd58e52021-06-21 10:54:49 -0700517 else
518 qdisc_class_hash_insert(&q->clhash, &cl->common);
Paolo Valente462dbc92012-11-23 11:03:19 +0000519 qfq_add_to_agg(q, new_agg, cl);
520 sch_tree_unlock(sch);
Eric Dumazet0cd58e52021-06-21 10:54:49 -0700521 qdisc_class_hash_grow(sch, &q->clhash);
Paolo Valente462dbc92012-11-23 11:03:19 +0000522
stephen hemminger0545a302011-04-04 05:30:58 +0000523 *arg = (unsigned long)cl;
524 return 0;
Paolo Valente462dbc92012-11-23 11:03:19 +0000525
526destroy_class:
Vlad Buslov86bd4462018-09-24 19:22:50 +0300527 qdisc_put(cl->qdisc);
Paolo Valente462dbc92012-11-23 11:03:19 +0000528 kfree(cl);
529 return err;
stephen hemminger0545a302011-04-04 05:30:58 +0000530}
531
532static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
533{
534 struct qfq_sched *q = qdisc_priv(sch);
535
Paolo Valente462dbc92012-11-23 11:03:19 +0000536 qfq_rm_from_agg(q, cl);
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800537 gen_kill_estimator(&cl->rate_est);
Vlad Buslov86bd4462018-09-24 19:22:50 +0300538 qdisc_put(cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000539 kfree(cl);
540}
541
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +0200542static int qfq_delete_class(struct Qdisc *sch, unsigned long arg,
543 struct netlink_ext_ack *extack)
stephen hemminger0545a302011-04-04 05:30:58 +0000544{
545 struct qfq_sched *q = qdisc_priv(sch);
546 struct qfq_class *cl = (struct qfq_class *)arg;
547
548 if (cl->filter_cnt > 0)
549 return -EBUSY;
550
551 sch_tree_lock(sch);
552
Paolo Abenie5f0e8f2019-03-28 16:53:13 +0100553 qdisc_purge_queue(cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000554 qdisc_class_hash_remove(&q->clhash, &cl->common);
555
stephen hemminger0545a302011-04-04 05:30:58 +0000556 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -0700557
558 qfq_destroy_class(sch, cl);
stephen hemminger0545a302011-04-04 05:30:58 +0000559 return 0;
560}
561
WANG Cong143976c2017-08-24 16:51:29 -0700562static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
stephen hemminger0545a302011-04-04 05:30:58 +0000563{
WANG Cong143976c2017-08-24 16:51:29 -0700564 return (unsigned long)qfq_find_class(sch, classid);
stephen hemminger0545a302011-04-04 05:30:58 +0000565}
566
Alexander Aringcbaacc42017-12-20 12:35:16 -0500567static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
568 struct netlink_ext_ack *extack)
stephen hemminger0545a302011-04-04 05:30:58 +0000569{
570 struct qfq_sched *q = qdisc_priv(sch);
571
572 if (cl)
573 return NULL;
574
Jiri Pirko6529eab2017-05-17 11:07:55 +0200575 return q->block;
stephen hemminger0545a302011-04-04 05:30:58 +0000576}
577
578static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
579 u32 classid)
580{
581 struct qfq_class *cl = qfq_find_class(sch, classid);
582
583 if (cl != NULL)
584 cl->filter_cnt++;
585
586 return (unsigned long)cl;
587}
588
589static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
590{
591 struct qfq_class *cl = (struct qfq_class *)arg;
592
593 cl->filter_cnt--;
594}
595
596static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500597 struct Qdisc *new, struct Qdisc **old,
598 struct netlink_ext_ack *extack)
stephen hemminger0545a302011-04-04 05:30:58 +0000599{
600 struct qfq_class *cl = (struct qfq_class *)arg;
601
602 if (new == NULL) {
Alexander Aringa38a98822017-12-20 12:35:21 -0500603 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
604 cl->common.classid, NULL);
stephen hemminger0545a302011-04-04 05:30:58 +0000605 if (new == NULL)
606 new = &noop_qdisc;
607 }
608
WANG Cong86a79962016-02-25 14:55:00 -0800609 *old = qdisc_replace(sch, new, &cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000610 return 0;
611}
612
613static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
614{
615 struct qfq_class *cl = (struct qfq_class *)arg;
616
617 return cl->qdisc;
618}
619
620static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
621 struct sk_buff *skb, struct tcmsg *tcm)
622{
623 struct qfq_class *cl = (struct qfq_class *)arg;
624 struct nlattr *nest;
625
626 tcm->tcm_parent = TC_H_ROOT;
627 tcm->tcm_handle = cl->common.classid;
628 tcm->tcm_info = cl->qdisc->handle;
629
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200630 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
stephen hemminger0545a302011-04-04 05:30:58 +0000631 if (nest == NULL)
632 goto nla_put_failure;
Paolo Valente462dbc92012-11-23 11:03:19 +0000633 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
634 nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
David S. Miller1b34ec42012-03-29 05:11:39 -0400635 goto nla_put_failure;
stephen hemminger0545a302011-04-04 05:30:58 +0000636 return nla_nest_end(skb, nest);
637
638nla_put_failure:
639 nla_nest_cancel(skb, nest);
640 return -EMSGSIZE;
641}
642
643static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
644 struct gnet_dump *d)
645{
646 struct qfq_class *cl = (struct qfq_class *)arg;
647 struct tc_qfq_stats xstats;
648
649 memset(&xstats, 0, sizeof(xstats));
stephen hemminger0545a302011-04-04 05:30:58 +0000650
Paolo Valente462dbc92012-11-23 11:03:19 +0000651 xstats.weight = cl->agg->class_weight;
652 xstats.lmax = cl->agg->lmax;
stephen hemminger0545a302011-04-04 05:30:58 +0000653
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +0200654 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800655 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Paolo Abeni5dd431b2019-03-28 16:53:12 +0100656 qdisc_qstats_copy(d, cl->qdisc) < 0)
stephen hemminger0545a302011-04-04 05:30:58 +0000657 return -1;
658
659 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
660}
661
662static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
663{
664 struct qfq_sched *q = qdisc_priv(sch);
665 struct qfq_class *cl;
stephen hemminger0545a302011-04-04 05:30:58 +0000666 unsigned int i;
667
668 if (arg->stop)
669 return;
670
671 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800672 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Zhengchao Shaoe046fa82022-09-21 10:41:18 +0800673 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
stephen hemminger0545a302011-04-04 05:30:58 +0000674 return;
stephen hemminger0545a302011-04-04 05:30:58 +0000675 }
676 }
677}
678
679static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
680 int *qerr)
681{
682 struct qfq_sched *q = qdisc_priv(sch);
683 struct qfq_class *cl;
684 struct tcf_result res;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700685 struct tcf_proto *fl;
stephen hemminger0545a302011-04-04 05:30:58 +0000686 int result;
687
688 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
689 pr_debug("qfq_classify: found %d\n", skb->priority);
690 cl = qfq_find_class(sch, skb->priority);
691 if (cl != NULL)
692 return cl;
693 }
694
695 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700696 fl = rcu_dereference_bh(q->filter_list);
Davide Caratti3aa26052021-07-28 20:08:00 +0200697 result = tcf_classify(skb, NULL, fl, &res, false);
stephen hemminger0545a302011-04-04 05:30:58 +0000698 if (result >= 0) {
699#ifdef CONFIG_NET_CLS_ACT
700 switch (result) {
701 case TC_ACT_QUEUED:
702 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200703 case TC_ACT_TRAP:
stephen hemminger0545a302011-04-04 05:30:58 +0000704 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500705 fallthrough;
stephen hemminger0545a302011-04-04 05:30:58 +0000706 case TC_ACT_SHOT:
707 return NULL;
708 }
709#endif
710 cl = (struct qfq_class *)res.class;
711 if (cl == NULL)
712 cl = qfq_find_class(sch, res.classid);
713 return cl;
714 }
715
716 return NULL;
717}
718
719/* Generic comparison function, handling wraparound. */
720static inline int qfq_gt(u64 a, u64 b)
721{
722 return (s64)(a - b) > 0;
723}
724
725/* Round a precise timestamp to its slotted value. */
726static inline u64 qfq_round_down(u64 ts, unsigned int shift)
727{
728 return ts & ~((1ULL << shift) - 1);
729}
730
731/* return the pointer to the group with lowest index in the bitmap */
732static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
733 unsigned long bitmap)
734{
735 int index = __ffs(bitmap);
736 return &q->groups[index];
737}
738/* Calculate a mask to mimic what would be ffs_from(). */
739static inline unsigned long mask_from(unsigned long bitmap, int from)
740{
741 return bitmap & ~((1UL << from) - 1);
742}
743
744/*
745 * The state computation relies on ER=0, IR=1, EB=2, IB=3
746 * First compute eligibility comparing grp->S, q->V,
747 * then check if someone is blocking us and possibly add EB
748 */
749static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
750{
751 /* if S > V we are not eligible */
752 unsigned int state = qfq_gt(grp->S, q->V);
753 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
754 struct qfq_group *next;
755
756 if (mask) {
757 next = qfq_ffs(q, mask);
758 if (qfq_gt(grp->F, next->F))
759 state |= EB;
760 }
761
762 return state;
763}
764
765
766/*
767 * In principle
768 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
769 * q->bitmaps[src] &= ~mask;
770 * but we should make sure that src != dst
771 */
772static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
773 int src, int dst)
774{
775 q->bitmaps[dst] |= q->bitmaps[src] & mask;
776 q->bitmaps[src] &= ~mask;
777}
778
779static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
780{
781 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
782 struct qfq_group *next;
783
784 if (mask) {
785 next = qfq_ffs(q, mask);
786 if (!qfq_gt(next->F, old_F))
787 return;
788 }
789
790 mask = (1UL << index) - 1;
791 qfq_move_groups(q, mask, EB, ER);
792 qfq_move_groups(q, mask, IB, IR);
793}
794
795/*
796 * perhaps
797 *
798 old_V ^= q->V;
Paolo Valente462dbc92012-11-23 11:03:19 +0000799 old_V >>= q->min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000800 if (old_V) {
801 ...
802 }
803 *
804 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000805static void qfq_make_eligible(struct qfq_sched *q)
stephen hemminger0545a302011-04-04 05:30:58 +0000806{
Paolo Valente462dbc92012-11-23 11:03:19 +0000807 unsigned long vslot = q->V >> q->min_slot_shift;
808 unsigned long old_vslot = q->oldV >> q->min_slot_shift;
stephen hemminger0545a302011-04-04 05:30:58 +0000809
810 if (vslot != old_vslot) {
Paolo Valente87f13692013-07-10 15:46:08 +0200811 unsigned long mask;
812 int last_flip_pos = fls(vslot ^ old_vslot);
813
814 if (last_flip_pos > 31) /* higher than the number of groups */
815 mask = ~0UL; /* make all groups eligible */
816 else
817 mask = (1UL << last_flip_pos) - 1;
818
stephen hemminger0545a302011-04-04 05:30:58 +0000819 qfq_move_groups(q, mask, IR, ER);
820 qfq_move_groups(q, mask, IB, EB);
821 }
822}
823
stephen hemminger0545a302011-04-04 05:30:58 +0000824/*
Paolo Valente87f40dd2013-07-16 08:52:30 +0200825 * The index of the slot in which the input aggregate agg is to be
826 * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
827 * and not a '-1' because the start time of the group may be moved
828 * backward by one slot after the aggregate has been inserted, and
829 * this would cause non-empty slots to be right-shifted by one
830 * position.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000831 *
Paolo Valente87f40dd2013-07-16 08:52:30 +0200832 * QFQ+ fully satisfies this bound to the slot index if the parameters
833 * of the classes are not changed dynamically, and if QFQ+ never
834 * happens to postpone the service of agg unjustly, i.e., it never
835 * happens that the aggregate becomes backlogged and eligible, or just
836 * eligible, while an aggregate with a higher approximated finish time
837 * is being served. In particular, in this case QFQ+ guarantees that
838 * the timestamps of agg are low enough that the slot index is never
839 * higher than 2. Unfortunately, QFQ+ cannot provide the same
840 * guarantee if it happens to unjustly postpone the service of agg, or
841 * if the parameters of some class are changed.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000842 *
Paolo Valente87f40dd2013-07-16 08:52:30 +0200843 * As for the first event, i.e., an out-of-order service, the
844 * upper bound to the slot index guaranteed by QFQ+ grows to
845 * 2 +
846 * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
847 * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
848 *
849 * The following function deals with this problem by backward-shifting
850 * the timestamps of agg, if needed, so as to guarantee that the slot
851 * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
852 * cause the service of other aggregates to be postponed, yet the
853 * worst-case guarantees of these aggregates are not violated. In
854 * fact, in case of no out-of-order service, the timestamps of agg
855 * would have been even lower than they are after the backward shift,
856 * because QFQ+ would have guaranteed a maximum value equal to 2 for
857 * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
858 * service is postponed because of the backward-shift would have
859 * however waited for the service of agg before being served.
860 *
861 * The other event that may cause the slot index to be higher than 2
862 * for agg is a recent change of the parameters of some class. If the
863 * weight of a class is increased or the lmax (max_pkt_size) of the
864 * class is decreased, then a new aggregate with smaller slot size
865 * than the original parent aggregate of the class may happen to be
866 * activated. The activation of this aggregate should be properly
867 * delayed to when the service of the class has finished in the ideal
868 * system tracked by QFQ+. If the activation of the aggregate is not
869 * delayed to this reference time instant, then this aggregate may be
870 * unjustly served before other aggregates waiting for service. This
871 * may cause the above bound to the slot index to be violated for some
872 * of these unlucky aggregates.
Paolo Valente3015f3d2012-11-05 20:29:24 +0000873 *
Paolo Valente462dbc92012-11-23 11:03:19 +0000874 * Instead of delaying the activation of the new aggregate, which is
Paolo Valente87f40dd2013-07-16 08:52:30 +0200875 * quite complex, the above-discussed capping of the slot index is
876 * used to handle also the consequences of a change of the parameters
877 * of a class.
stephen hemminger0545a302011-04-04 05:30:58 +0000878 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000879static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
stephen hemminger0545a302011-04-04 05:30:58 +0000880 u64 roundedS)
881{
882 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000883 unsigned int i; /* slot index in the bucket list */
884
885 if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
886 u64 deltaS = roundedS - grp->S -
887 ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
Paolo Valente462dbc92012-11-23 11:03:19 +0000888 agg->S -= deltaS;
889 agg->F -= deltaS;
Paolo Valente3015f3d2012-11-05 20:29:24 +0000890 slot = QFQ_MAX_SLOTS - 2;
891 }
892
893 i = (grp->front + slot) % QFQ_MAX_SLOTS;
stephen hemminger0545a302011-04-04 05:30:58 +0000894
Paolo Valente462dbc92012-11-23 11:03:19 +0000895 hlist_add_head(&agg->next, &grp->slots[i]);
stephen hemminger0545a302011-04-04 05:30:58 +0000896 __set_bit(slot, &grp->full_slots);
897}
898
899/* Maybe introduce hlist_first_entry?? */
Paolo Valente462dbc92012-11-23 11:03:19 +0000900static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
stephen hemminger0545a302011-04-04 05:30:58 +0000901{
902 return hlist_entry(grp->slots[grp->front].first,
Paolo Valente462dbc92012-11-23 11:03:19 +0000903 struct qfq_aggregate, next);
stephen hemminger0545a302011-04-04 05:30:58 +0000904}
905
906/*
907 * remove the entry from the slot
908 */
909static void qfq_front_slot_remove(struct qfq_group *grp)
910{
Paolo Valente462dbc92012-11-23 11:03:19 +0000911 struct qfq_aggregate *agg = qfq_slot_head(grp);
stephen hemminger0545a302011-04-04 05:30:58 +0000912
Paolo Valente462dbc92012-11-23 11:03:19 +0000913 BUG_ON(!agg);
914 hlist_del(&agg->next);
stephen hemminger0545a302011-04-04 05:30:58 +0000915 if (hlist_empty(&grp->slots[grp->front]))
916 __clear_bit(0, &grp->full_slots);
917}
918
919/*
Paolo Valente462dbc92012-11-23 11:03:19 +0000920 * Returns the first aggregate in the first non-empty bucket of the
921 * group. As a side effect, adjusts the bucket list so the first
922 * non-empty bucket is at position 0 in full_slots.
stephen hemminger0545a302011-04-04 05:30:58 +0000923 */
Paolo Valente462dbc92012-11-23 11:03:19 +0000924static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
stephen hemminger0545a302011-04-04 05:30:58 +0000925{
926 unsigned int i;
927
928 pr_debug("qfq slot_scan: grp %u full %#lx\n",
929 grp->index, grp->full_slots);
930
931 if (grp->full_slots == 0)
932 return NULL;
933
934 i = __ffs(grp->full_slots); /* zero based */
935 if (i > 0) {
936 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
937 grp->full_slots >>= i;
938 }
939
940 return qfq_slot_head(grp);
941}
942
943/*
944 * adjust the bucket list. When the start time of a group decreases,
945 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
946 * move the objects. The mask of occupied slots must be shifted
947 * because we use ffs() to find the first non-empty slot.
948 * This covers decreases in the group's start time, but what about
949 * increases of the start time ?
950 * Here too we should make sure that i is less than 32
951 */
952static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
953{
954 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
955
956 grp->full_slots <<= i;
957 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
958}
959
Paolo Valente462dbc92012-11-23 11:03:19 +0000960static void qfq_update_eligible(struct qfq_sched *q)
stephen hemminger0545a302011-04-04 05:30:58 +0000961{
962 struct qfq_group *grp;
963 unsigned long ineligible;
964
965 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
966 if (ineligible) {
967 if (!q->bitmaps[ER]) {
968 grp = qfq_ffs(q, ineligible);
969 if (qfq_gt(grp->S, q->V))
970 q->V = grp->S;
971 }
Paolo Valente462dbc92012-11-23 11:03:19 +0000972 qfq_make_eligible(q);
stephen hemminger0545a302011-04-04 05:30:58 +0000973 }
974}
975
Paolo Valente462dbc92012-11-23 11:03:19 +0000976/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
977static void agg_dequeue(struct qfq_aggregate *agg,
978 struct qfq_class *cl, unsigned int len)
stephen hemminger0545a302011-04-04 05:30:58 +0000979{
Paolo Valente462dbc92012-11-23 11:03:19 +0000980 qdisc_dequeue_peeked(cl->qdisc);
stephen hemminger0545a302011-04-04 05:30:58 +0000981
Paolo Valente462dbc92012-11-23 11:03:19 +0000982 cl->deficit -= (int) len;
stephen hemminger0545a302011-04-04 05:30:58 +0000983
Paolo Valente462dbc92012-11-23 11:03:19 +0000984 if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
985 list_del(&cl->alist);
986 else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
987 cl->deficit += agg->lmax;
988 list_move_tail(&cl->alist, &agg->active);
stephen hemminger0545a302011-04-04 05:30:58 +0000989 }
Paolo Valente462dbc92012-11-23 11:03:19 +0000990}
stephen hemminger0545a302011-04-04 05:30:58 +0000991
Paolo Valente462dbc92012-11-23 11:03:19 +0000992static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
993 struct qfq_class **cl,
994 unsigned int *len)
995{
996 struct sk_buff *skb;
997
998 *cl = list_first_entry(&agg->active, struct qfq_class, alist);
999 skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
1000 if (skb == NULL)
1001 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
1002 else
1003 *len = qdisc_pkt_len(skb);
1004
1005 return skb;
1006}
1007
1008/* Update F according to the actual service received by the aggregate. */
1009static inline void charge_actual_service(struct qfq_aggregate *agg)
1010{
Paolo Valente9b99b7e2013-03-05 08:04:57 +00001011 /* Compute the service received by the aggregate, taking into
1012 * account that, after decreasing the number of classes in
1013 * agg, it may happen that
1014 * agg->initial_budget - agg->budget > agg->bugdetmax
1015 */
1016 u32 service_received = min(agg->budgetmax,
1017 agg->initial_budget - agg->budget);
Paolo Valente462dbc92012-11-23 11:03:19 +00001018
1019 agg->F = agg->S + (u64)service_received * agg->inv_w;
stephen hemminger0545a302011-04-04 05:30:58 +00001020}
1021
Paolo Valente88d4f412013-07-10 15:46:09 +02001022/* Assign a reasonable start time for a new aggregate in group i.
1023 * Admissible values for \hat(F) are multiples of \sigma_i
1024 * no greater than V+\sigma_i . Larger values mean that
1025 * we had a wraparound so we consider the timestamp to be stale.
1026 *
1027 * If F is not stale and F >= V then we set S = F.
1028 * Otherwise we should assign S = V, but this may violate
1029 * the ordering in EB (see [2]). So, if we have groups in ER,
1030 * set S to the F_j of the first group j which would be blocking us.
1031 * We are guaranteed not to move S backward because
1032 * otherwise our group i would still be blocked.
1033 */
1034static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
1035{
1036 unsigned long mask;
1037 u64 limit, roundedF;
1038 int slot_shift = agg->grp->slot_shift;
1039
1040 roundedF = qfq_round_down(agg->F, slot_shift);
1041 limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
1042
1043 if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
1044 /* timestamp was stale */
1045 mask = mask_from(q->bitmaps[ER], agg->grp->index);
1046 if (mask) {
1047 struct qfq_group *next = qfq_ffs(q, mask);
1048 if (qfq_gt(roundedF, next->F)) {
1049 if (qfq_gt(limit, next->F))
1050 agg->S = next->F;
1051 else /* preserve timestamp correctness */
1052 agg->S = limit;
1053 return;
1054 }
1055 }
1056 agg->S = q->V;
1057 } else /* timestamp is not stale */
1058 agg->S = agg->F;
1059}
1060
1061/* Update the timestamps of agg before scheduling/rescheduling it for
1062 * service. In particular, assign to agg->F its maximum possible
1063 * value, i.e., the virtual finish time with which the aggregate
1064 * should be labeled if it used all its budget once in service.
1065 */
1066static inline void
1067qfq_update_agg_ts(struct qfq_sched *q,
1068 struct qfq_aggregate *agg, enum update_reason reason)
1069{
1070 if (reason != requeue)
1071 qfq_update_start(q, agg);
1072 else /* just charge agg for the service received */
1073 agg->S = agg->F;
1074
1075 agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
1076}
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001077
1078static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
1079
stephen hemminger0545a302011-04-04 05:30:58 +00001080static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1081{
1082 struct qfq_sched *q = qdisc_priv(sch);
Paolo Valente462dbc92012-11-23 11:03:19 +00001083 struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
stephen hemminger0545a302011-04-04 05:30:58 +00001084 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001085 struct sk_buff *skb = NULL;
1086 /* next-packet len, 0 means no more active classes in in-service agg */
1087 unsigned int len = 0;
1088
1089 if (in_serv_agg == NULL)
1090 return NULL;
1091
1092 if (!list_empty(&in_serv_agg->active))
1093 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1094
1095 /*
1096 * If there are no active classes in the in-service aggregate,
1097 * or if the aggregate has not enough budget to serve its next
1098 * class, then choose the next aggregate to serve.
1099 */
1100 if (len == 0 || in_serv_agg->budget < len) {
1101 charge_actual_service(in_serv_agg);
1102
1103 /* recharge the budget of the aggregate */
1104 in_serv_agg->initial_budget = in_serv_agg->budget =
1105 in_serv_agg->budgetmax;
1106
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001107 if (!list_empty(&in_serv_agg->active)) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001108 /*
1109 * Still active: reschedule for
1110 * service. Possible optimization: if no other
1111 * aggregate is active, then there is no point
1112 * in rescheduling this aggregate, and we can
1113 * just keep it as the in-service one. This
1114 * should be however a corner case, and to
1115 * handle it, we would need to maintain an
1116 * extra num_active_aggs field.
1117 */
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001118 qfq_update_agg_ts(q, in_serv_agg, requeue);
1119 qfq_schedule_agg(q, in_serv_agg);
1120 } else if (sch->q.qlen == 0) { /* no aggregate to serve */
Paolo Valente462dbc92012-11-23 11:03:19 +00001121 q->in_serv_agg = NULL;
1122 return NULL;
1123 }
1124
1125 /*
1126 * If we get here, there are other aggregates queued:
1127 * choose the new aggregate to serve.
1128 */
1129 in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
1130 skb = qfq_peek_skb(in_serv_agg, &cl, &len);
1131 }
1132 if (!skb)
1133 return NULL;
1134
WANG Cong2ed5c3f2016-09-18 16:22:47 -07001135 qdisc_qstats_backlog_dec(sch, skb);
Paolo Valente462dbc92012-11-23 11:03:19 +00001136 sch->q.qlen--;
1137 qdisc_bstats_update(sch, skb);
1138
1139 agg_dequeue(in_serv_agg, cl, len);
Paolo Valentea0143ef2013-03-05 08:05:00 +00001140 /* If lmax is lowered, through qfq_change_class, for a class
1141 * owning pending packets with larger size than the new value
1142 * of lmax, then the following condition may hold.
1143 */
1144 if (unlikely(in_serv_agg->budget < len))
1145 in_serv_agg->budget = 0;
1146 else
1147 in_serv_agg->budget -= len;
1148
Paolo Valente87f40dd2013-07-16 08:52:30 +02001149 q->V += (u64)len * q->iwsum;
Paolo Valente462dbc92012-11-23 11:03:19 +00001150 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1151 len, (unsigned long long) in_serv_agg->F,
1152 (unsigned long long) q->V);
1153
1154 return skb;
1155}
1156
1157static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1158{
1159 struct qfq_group *grp;
1160 struct qfq_aggregate *agg, *new_front_agg;
1161 u64 old_F;
1162
1163 qfq_update_eligible(q);
1164 q->oldV = q->V;
stephen hemminger0545a302011-04-04 05:30:58 +00001165
1166 if (!q->bitmaps[ER])
1167 return NULL;
1168
1169 grp = qfq_ffs(q, q->bitmaps[ER]);
Paolo Valente462dbc92012-11-23 11:03:19 +00001170 old_F = grp->F;
stephen hemminger0545a302011-04-04 05:30:58 +00001171
Paolo Valente462dbc92012-11-23 11:03:19 +00001172 agg = qfq_slot_head(grp);
1173
1174 /* agg starts to be served, remove it from schedule */
1175 qfq_front_slot_remove(grp);
1176
1177 new_front_agg = qfq_slot_scan(grp);
1178
1179 if (new_front_agg == NULL) /* group is now inactive, remove from ER */
1180 __clear_bit(grp->index, &q->bitmaps[ER]);
1181 else {
1182 u64 roundedS = qfq_round_down(new_front_agg->S,
1183 grp->slot_shift);
1184 unsigned int s;
1185
1186 if (grp->S == roundedS)
1187 return agg;
1188 grp->S = roundedS;
1189 grp->F = roundedS + (2ULL << grp->slot_shift);
1190 __clear_bit(grp->index, &q->bitmaps[ER]);
1191 s = qfq_calc_state(q, grp);
1192 __set_bit(grp->index, &q->bitmaps[s]);
stephen hemminger0545a302011-04-04 05:30:58 +00001193 }
1194
Paolo Valente462dbc92012-11-23 11:03:19 +00001195 qfq_unblock_groups(q, grp->index, old_F);
stephen hemminger0545a302011-04-04 05:30:58 +00001196
Paolo Valente462dbc92012-11-23 11:03:19 +00001197 return agg;
stephen hemminger0545a302011-04-04 05:30:58 +00001198}
1199
Petr Machataac5c66f2020-07-14 20:03:08 +03001200static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
Eric Dumazet520ac302016-06-21 23:16:49 -07001201 struct sk_buff **to_free)
stephen hemminger0545a302011-04-04 05:30:58 +00001202{
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001203 unsigned int len = qdisc_pkt_len(skb), gso_segs;
stephen hemminger0545a302011-04-04 05:30:58 +00001204 struct qfq_sched *q = qdisc_priv(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001205 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001206 struct qfq_aggregate *agg;
David S. Millerf54ba772012-09-27 18:35:47 -04001207 int err = 0;
Toke Høiland-Jørgensen37d9cf12019-01-09 17:09:43 +01001208 bool first;
stephen hemminger0545a302011-04-04 05:30:58 +00001209
1210 cl = qfq_classify(skb, sch, &err);
1211 if (cl == NULL) {
1212 if (err & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -07001213 qdisc_qstats_drop(sch);
Gao Feng39ad1292017-09-04 14:21:12 +08001214 __qdisc_drop(skb, to_free);
stephen hemminger0545a302011-04-04 05:30:58 +00001215 return err;
1216 }
1217 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1218
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001219 if (unlikely(cl->agg->lmax < len)) {
Paolo Valente3015f3d2012-11-05 20:29:24 +00001220 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001221 cl->agg->lmax, len, cl->common.classid);
1222 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
Florian Westphal9b153502016-06-08 23:23:01 +02001223 if (err) {
1224 cl->qstats.drops++;
Eric Dumazet520ac302016-06-21 23:16:49 -07001225 return qdisc_drop(skb, sch, to_free);
Florian Westphal9b153502016-06-08 23:23:01 +02001226 }
Paolo Valente3015f3d2012-11-05 20:29:24 +00001227 }
1228
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001229 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
Toke Høiland-Jørgensen37d9cf12019-01-09 17:09:43 +01001230 first = !cl->qdisc->q.qlen;
Petr Machataac5c66f2020-07-14 20:03:08 +03001231 err = qdisc_enqueue(skb, cl->qdisc, to_free);
stephen hemminger0545a302011-04-04 05:30:58 +00001232 if (unlikely(err != NET_XMIT_SUCCESS)) {
1233 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1234 if (net_xmit_drop_count(err)) {
1235 cl->qstats.drops++;
John Fastabend25331d62014-09-28 11:53:29 -07001236 qdisc_qstats_drop(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001237 }
1238 return err;
1239 }
1240
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001241 _bstats_update(&cl->bstats, len, gso_segs);
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001242 sch->qstats.backlog += len;
stephen hemminger0545a302011-04-04 05:30:58 +00001243 ++sch->q.qlen;
1244
Paolo Valente462dbc92012-11-23 11:03:19 +00001245 agg = cl->agg;
1246 /* if the queue was not empty, then done here */
Toke Høiland-Jørgensen37d9cf12019-01-09 17:09:43 +01001247 if (!first) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001248 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1249 list_first_entry(&agg->active, struct qfq_class, alist)
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +01001250 == cl && cl->deficit < len)
Paolo Valente462dbc92012-11-23 11:03:19 +00001251 list_move_tail(&cl->alist, &agg->active);
stephen hemminger0545a302011-04-04 05:30:58 +00001252
Paolo Valente462dbc92012-11-23 11:03:19 +00001253 return err;
1254 }
1255
1256 /* schedule class for service within the aggregate */
1257 cl->deficit = agg->lmax;
1258 list_add_tail(&cl->alist, &agg->active);
1259
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001260 if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
1261 q->in_serv_agg == agg)
1262 return err; /* non-empty or in service, nothing else to do */
Paolo Valente462dbc92012-11-23 11:03:19 +00001263
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001264 qfq_activate_agg(q, agg, enqueue);
Paolo Valentebe72f632012-08-07 07:27:25 +00001265
1266 return err;
1267}
1268
1269/*
Paolo Valente462dbc92012-11-23 11:03:19 +00001270 * Schedule aggregate according to its timestamps.
Paolo Valentebe72f632012-08-07 07:27:25 +00001271 */
Paolo Valente462dbc92012-11-23 11:03:19 +00001272static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
Paolo Valentebe72f632012-08-07 07:27:25 +00001273{
Paolo Valente462dbc92012-11-23 11:03:19 +00001274 struct qfq_group *grp = agg->grp;
Paolo Valentebe72f632012-08-07 07:27:25 +00001275 u64 roundedS;
1276 int s;
1277
Paolo Valente462dbc92012-11-23 11:03:19 +00001278 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001279
1280 /*
Paolo Valente462dbc92012-11-23 11:03:19 +00001281 * Insert agg in the correct bucket.
1282 * If agg->S >= grp->S we don't need to adjust the
stephen hemminger0545a302011-04-04 05:30:58 +00001283 * bucket list and simply go to the insertion phase.
1284 * Otherwise grp->S is decreasing, we must make room
1285 * in the bucket list, and also recompute the group state.
1286 * Finally, if there were no flows in this group and nobody
1287 * was in ER make sure to adjust V.
1288 */
1289 if (grp->full_slots) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001290 if (!qfq_gt(grp->S, agg->S))
stephen hemminger0545a302011-04-04 05:30:58 +00001291 goto skip_update;
1292
Paolo Valente462dbc92012-11-23 11:03:19 +00001293 /* create a slot for this agg->S */
stephen hemminger0545a302011-04-04 05:30:58 +00001294 qfq_slot_rotate(grp, roundedS);
1295 /* group was surely ineligible, remove */
1296 __clear_bit(grp->index, &q->bitmaps[IR]);
1297 __clear_bit(grp->index, &q->bitmaps[IB]);
Paolo Valente40dd2d52013-03-05 08:05:01 +00001298 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
1299 q->in_serv_agg == NULL)
stephen hemminger0545a302011-04-04 05:30:58 +00001300 q->V = roundedS;
1301
1302 grp->S = roundedS;
1303 grp->F = roundedS + (2ULL << grp->slot_shift);
1304 s = qfq_calc_state(q, grp);
1305 __set_bit(grp->index, &q->bitmaps[s]);
1306
1307 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1308 s, q->bitmaps[s],
Paolo Valente462dbc92012-11-23 11:03:19 +00001309 (unsigned long long) agg->S,
1310 (unsigned long long) agg->F,
stephen hemminger0545a302011-04-04 05:30:58 +00001311 (unsigned long long) q->V);
1312
1313skip_update:
Paolo Valente462dbc92012-11-23 11:03:19 +00001314 qfq_slot_insert(grp, agg, roundedS);
stephen hemminger0545a302011-04-04 05:30:58 +00001315}
1316
1317
Paolo Valente462dbc92012-11-23 11:03:19 +00001318/* Update agg ts and schedule agg for service */
1319static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
1320 enum update_reason reason)
1321{
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001322 agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
1323
Paolo Valente462dbc92012-11-23 11:03:19 +00001324 qfq_update_agg_ts(q, agg, reason);
Paolo Valente2f3b89a2013-03-05 08:04:59 +00001325 if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
1326 q->in_serv_agg = agg; /* start serving this aggregate */
1327 /* update V: to be in service, agg must be eligible */
1328 q->oldV = q->V = agg->S;
1329 } else if (agg != q->in_serv_agg)
1330 qfq_schedule_agg(q, agg);
Paolo Valente462dbc92012-11-23 11:03:19 +00001331}
1332
stephen hemminger0545a302011-04-04 05:30:58 +00001333static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
Paolo Valente462dbc92012-11-23 11:03:19 +00001334 struct qfq_aggregate *agg)
stephen hemminger0545a302011-04-04 05:30:58 +00001335{
1336 unsigned int i, offset;
1337 u64 roundedS;
1338
Paolo Valente462dbc92012-11-23 11:03:19 +00001339 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001340 offset = (roundedS - grp->S) >> grp->slot_shift;
Paolo Valente462dbc92012-11-23 11:03:19 +00001341
stephen hemminger0545a302011-04-04 05:30:58 +00001342 i = (grp->front + offset) % QFQ_MAX_SLOTS;
1343
Paolo Valente462dbc92012-11-23 11:03:19 +00001344 hlist_del(&agg->next);
stephen hemminger0545a302011-04-04 05:30:58 +00001345 if (hlist_empty(&grp->slots[i]))
1346 __clear_bit(offset, &grp->full_slots);
1347}
1348
1349/*
Paolo Valente462dbc92012-11-23 11:03:19 +00001350 * Called to forcibly deschedule an aggregate. If the aggregate is
1351 * not in the front bucket, or if the latter has other aggregates in
1352 * the front bucket, we can simply remove the aggregate with no other
1353 * side effects.
stephen hemminger0545a302011-04-04 05:30:58 +00001354 * Otherwise we must propagate the event up.
1355 */
Paolo Valente462dbc92012-11-23 11:03:19 +00001356static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
stephen hemminger0545a302011-04-04 05:30:58 +00001357{
Paolo Valente462dbc92012-11-23 11:03:19 +00001358 struct qfq_group *grp = agg->grp;
stephen hemminger0545a302011-04-04 05:30:58 +00001359 unsigned long mask;
1360 u64 roundedS;
1361 int s;
1362
Paolo Valente462dbc92012-11-23 11:03:19 +00001363 if (agg == q->in_serv_agg) {
1364 charge_actual_service(agg);
1365 q->in_serv_agg = qfq_choose_next_agg(q);
1366 return;
1367 }
1368
1369 agg->F = agg->S;
1370 qfq_slot_remove(q, grp, agg);
stephen hemminger0545a302011-04-04 05:30:58 +00001371
1372 if (!grp->full_slots) {
1373 __clear_bit(grp->index, &q->bitmaps[IR]);
1374 __clear_bit(grp->index, &q->bitmaps[EB]);
1375 __clear_bit(grp->index, &q->bitmaps[IB]);
1376
1377 if (test_bit(grp->index, &q->bitmaps[ER]) &&
1378 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
1379 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
1380 if (mask)
1381 mask = ~((1UL << __fls(mask)) - 1);
1382 else
1383 mask = ~0UL;
1384 qfq_move_groups(q, mask, EB, ER);
1385 qfq_move_groups(q, mask, IB, IR);
1386 }
1387 __clear_bit(grp->index, &q->bitmaps[ER]);
1388 } else if (hlist_empty(&grp->slots[grp->front])) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001389 agg = qfq_slot_scan(grp);
1390 roundedS = qfq_round_down(agg->S, grp->slot_shift);
stephen hemminger0545a302011-04-04 05:30:58 +00001391 if (grp->S != roundedS) {
1392 __clear_bit(grp->index, &q->bitmaps[ER]);
1393 __clear_bit(grp->index, &q->bitmaps[IR]);
1394 __clear_bit(grp->index, &q->bitmaps[EB]);
1395 __clear_bit(grp->index, &q->bitmaps[IB]);
1396 grp->S = roundedS;
1397 grp->F = roundedS + (2ULL << grp->slot_shift);
1398 s = qfq_calc_state(q, grp);
1399 __set_bit(grp->index, &q->bitmaps[s]);
1400 }
1401 }
stephen hemminger0545a302011-04-04 05:30:58 +00001402}
1403
1404static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1405{
1406 struct qfq_sched *q = qdisc_priv(sch);
1407 struct qfq_class *cl = (struct qfq_class *)arg;
1408
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001409 qfq_deactivate_class(q, cl);
stephen hemminger0545a302011-04-04 05:30:58 +00001410}
1411
Alexander Aringe63d7dfd2017-12-20 12:35:13 -05001412static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1413 struct netlink_ext_ack *extack)
stephen hemminger0545a302011-04-04 05:30:58 +00001414{
1415 struct qfq_sched *q = qdisc_priv(sch);
1416 struct qfq_group *grp;
1417 int i, j, err;
Paolo Valente462dbc92012-11-23 11:03:19 +00001418 u32 max_cl_shift, maxbudg_shift, max_classes;
stephen hemminger0545a302011-04-04 05:30:58 +00001419
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001420 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001421 if (err)
1422 return err;
1423
stephen hemminger0545a302011-04-04 05:30:58 +00001424 err = qdisc_class_hash_init(&q->clhash);
1425 if (err < 0)
1426 return err;
1427
Eric Dumazet7d18a072022-01-04 01:45:08 -08001428 max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
1429 QFQ_MAX_AGG_CLASSES);
Paolo Valente462dbc92012-11-23 11:03:19 +00001430 /* max_cl_shift = floor(log_2(max_classes)) */
1431 max_cl_shift = __fls(max_classes);
1432 q->max_agg_classes = 1<<max_cl_shift;
1433
1434 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1435 maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
1436 q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
1437
stephen hemminger0545a302011-04-04 05:30:58 +00001438 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1439 grp = &q->groups[i];
1440 grp->index = i;
Paolo Valente462dbc92012-11-23 11:03:19 +00001441 grp->slot_shift = q->min_slot_shift + i;
stephen hemminger0545a302011-04-04 05:30:58 +00001442 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1443 INIT_HLIST_HEAD(&grp->slots[j]);
1444 }
1445
Paolo Valente462dbc92012-11-23 11:03:19 +00001446 INIT_HLIST_HEAD(&q->nonfull_aggs);
1447
stephen hemminger0545a302011-04-04 05:30:58 +00001448 return 0;
1449}
1450
1451static void qfq_reset_qdisc(struct Qdisc *sch)
1452{
1453 struct qfq_sched *q = qdisc_priv(sch);
stephen hemminger0545a302011-04-04 05:30:58 +00001454 struct qfq_class *cl;
Paolo Valente462dbc92012-11-23 11:03:19 +00001455 unsigned int i;
stephen hemminger0545a302011-04-04 05:30:58 +00001456
1457 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001458 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Paolo Valente462dbc92012-11-23 11:03:19 +00001459 if (cl->qdisc->q.qlen > 0)
1460 qfq_deactivate_class(q, cl);
1461
stephen hemminger0545a302011-04-04 05:30:58 +00001462 qdisc_reset(cl->qdisc);
Paolo Valente462dbc92012-11-23 11:03:19 +00001463 }
stephen hemminger0545a302011-04-04 05:30:58 +00001464 }
stephen hemminger0545a302011-04-04 05:30:58 +00001465}
1466
1467static void qfq_destroy_qdisc(struct Qdisc *sch)
1468{
1469 struct qfq_sched *q = qdisc_priv(sch);
1470 struct qfq_class *cl;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001471 struct hlist_node *next;
stephen hemminger0545a302011-04-04 05:30:58 +00001472 unsigned int i;
1473
Jiri Pirko6529eab2017-05-17 11:07:55 +02001474 tcf_block_put(q->block);
stephen hemminger0545a302011-04-04 05:30:58 +00001475
1476 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001477 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
stephen hemminger0545a302011-04-04 05:30:58 +00001478 common.hnode) {
1479 qfq_destroy_class(sch, cl);
1480 }
1481 }
1482 qdisc_class_hash_destroy(&q->clhash);
1483}
1484
1485static const struct Qdisc_class_ops qfq_class_ops = {
1486 .change = qfq_change_class,
1487 .delete = qfq_delete_class,
WANG Cong143976c2017-08-24 16:51:29 -07001488 .find = qfq_search_class,
Jiri Pirko6529eab2017-05-17 11:07:55 +02001489 .tcf_block = qfq_tcf_block,
stephen hemminger0545a302011-04-04 05:30:58 +00001490 .bind_tcf = qfq_bind_tcf,
1491 .unbind_tcf = qfq_unbind_tcf,
1492 .graft = qfq_graft_class,
1493 .leaf = qfq_class_leaf,
1494 .qlen_notify = qfq_qlen_notify,
1495 .dump = qfq_dump_class,
1496 .dump_stats = qfq_dump_class_stats,
1497 .walk = qfq_walk,
1498};
1499
1500static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1501 .cl_ops = &qfq_class_ops,
1502 .id = "qfq",
1503 .priv_size = sizeof(struct qfq_sched),
1504 .enqueue = qfq_enqueue,
1505 .dequeue = qfq_dequeue,
1506 .peek = qdisc_peek_dequeued,
stephen hemminger0545a302011-04-04 05:30:58 +00001507 .init = qfq_init_qdisc,
1508 .reset = qfq_reset_qdisc,
1509 .destroy = qfq_destroy_qdisc,
1510 .owner = THIS_MODULE,
1511};
1512
1513static int __init qfq_init(void)
1514{
1515 return register_qdisc(&qfq_qdisc_ops);
1516}
1517
1518static void __exit qfq_exit(void)
1519{
1520 unregister_qdisc(&qfq_qdisc_ops);
1521}
1522
1523module_init(qfq_init);
1524module_exit(qfq_exit);
1525MODULE_LICENSE("GPL");