blob: 8599c6f31b057f494f64941929a2012e5fddac5a [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/sch_gred.c Generic Random Early Detection queue.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
6 *
7 * 991129: - Bug fix with grio mode
8 * - a better sing. AvgQ mode with Grio(WRED)
9 * - A finer grained VQ dequeue based on sugestion
10 * from Ren Liu
11 * - More error checks
12 *
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010013 * For all the glorious comments look at include/net/red.h
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
Jakub Kicinski890d8d22018-11-19 15:21:42 -080021#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <net/pkt_sched.h>
Thomas Graf22b33422005-11-05 21:14:16 +010023#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Graff62d6b92005-11-05 21:14:15 +010025#define GRED_DEF_PRIO (MAX_DPs / 2)
Thomas Graf716a1b42005-11-05 21:14:20 +010026#define GRED_VQ_MASK (MAX_DPs - 1)
Thomas Graff62d6b92005-11-05 21:14:15 +010027
Jakub Kicinski25fc1982018-11-14 22:23:50 -080028#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030struct gred_sched_data;
31struct gred_sched;
32
Eric Dumazetcc7ec452011-01-19 19:26:56 +000033struct gred_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 u32 limit; /* HARD maximal queue length */
Eric Dumazeta73ed262011-12-09 02:46:45 +000035 u32 DP; /* the drop parameters */
Jakub Kicinski25fc1982018-11-14 22:23:50 -080036 u32 red_flags; /* virtualQ version of red_flags */
Jakub Kicinski9f5cd0c2018-11-14 22:23:48 -080037 u64 bytesin; /* bytes seen on virtualQ so far*/
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 u32 packetsin; /* packets seen on virtualQ so far*/
39 u32 backlog; /* bytes on the virtualQ */
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010040 u8 prio; /* the prio of this vq */
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Thomas Graf22b33422005-11-05 21:14:16 +010042 struct red_parms parms;
Eric Dumazeteeca6682012-01-05 02:25:16 +000043 struct red_vars vars;
Thomas Graf22b33422005-11-05 21:14:16 +010044 struct red_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045};
46
Thomas Grafdea3f622005-11-05 21:14:09 +010047enum {
48 GRED_WRED_MODE = 1,
Thomas Grafd6fd4e92005-11-05 21:14:10 +010049 GRED_RIO_MODE,
Thomas Grafdea3f622005-11-05 21:14:09 +010050};
51
Eric Dumazetcc7ec452011-01-19 19:26:56 +000052struct gred_sched {
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 struct gred_sched_data *tab[MAX_DPs];
Thomas Grafdea3f622005-11-05 21:14:09 +010054 unsigned long flags;
Thomas Grafb38c7ee2005-11-05 21:14:27 +010055 u32 red_flags;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010056 u32 DPs;
57 u32 def;
Eric Dumazeteeca6682012-01-05 02:25:16 +000058 struct red_vars wred_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059};
60
Thomas Grafdea3f622005-11-05 21:14:09 +010061static inline int gred_wred_mode(struct gred_sched *table)
62{
63 return test_bit(GRED_WRED_MODE, &table->flags);
64}
65
66static inline void gred_enable_wred_mode(struct gred_sched *table)
67{
68 __set_bit(GRED_WRED_MODE, &table->flags);
69}
70
71static inline void gred_disable_wred_mode(struct gred_sched *table)
72{
73 __clear_bit(GRED_WRED_MODE, &table->flags);
74}
75
Thomas Grafd6fd4e92005-11-05 21:14:10 +010076static inline int gred_rio_mode(struct gred_sched *table)
77{
78 return test_bit(GRED_RIO_MODE, &table->flags);
79}
80
81static inline void gred_enable_rio_mode(struct gred_sched *table)
82{
83 __set_bit(GRED_RIO_MODE, &table->flags);
84}
85
86static inline void gred_disable_rio_mode(struct gred_sched *table)
87{
88 __clear_bit(GRED_RIO_MODE, &table->flags);
89}
90
Thomas Grafdea3f622005-11-05 21:14:09 +010091static inline int gred_wred_mode_check(struct Qdisc *sch)
92{
93 struct gred_sched *table = qdisc_priv(sch);
94 int i;
95
96 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
97 for (i = 0; i < table->DPs; i++) {
98 struct gred_sched_data *q = table->tab[i];
99 int n;
100
101 if (q == NULL)
102 continue;
103
David Wardc22e4642012-09-13 05:22:33 +0000104 for (n = i + 1; n < table->DPs; n++)
105 if (table->tab[n] && table->tab[n]->prio == q->prio)
Thomas Grafdea3f622005-11-05 21:14:09 +0100106 return 1;
107 }
108
109 return 0;
110}
111
Thomas Graf22b33422005-11-05 21:14:16 +0100112static inline unsigned int gred_backlog(struct gred_sched *table,
113 struct gred_sched_data *q,
114 struct Qdisc *sch)
115{
116 if (gred_wred_mode(table))
117 return sch->qstats.backlog;
118 else
119 return q->backlog;
120}
121
Thomas Graf716a1b42005-11-05 21:14:20 +0100122static inline u16 tc_index_to_dp(struct sk_buff *skb)
123{
124 return skb->tc_index & GRED_VQ_MASK;
125}
126
Eric Dumazeteeca6682012-01-05 02:25:16 +0000127static inline void gred_load_wred_set(const struct gred_sched *table,
Thomas Graf70517032005-11-05 21:14:23 +0100128 struct gred_sched_data *q)
129{
Eric Dumazeteeca6682012-01-05 02:25:16 +0000130 q->vars.qavg = table->wred_set.qavg;
131 q->vars.qidlestart = table->wred_set.qidlestart;
Thomas Graf70517032005-11-05 21:14:23 +0100132}
133
134static inline void gred_store_wred_set(struct gred_sched *table,
135 struct gred_sched_data *q)
136{
Eric Dumazeteeca6682012-01-05 02:25:16 +0000137 table->wred_set.qavg = q->vars.qavg;
David Wardba1bf472012-09-13 05:22:35 +0000138 table->wred_set.qidlestart = q->vars.qidlestart;
Thomas Graf70517032005-11-05 21:14:23 +0100139}
140
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800141static int gred_use_ecn(struct gred_sched_data *q)
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100142{
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800143 return q->red_flags & TC_RED_ECN;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100144}
145
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800146static int gred_use_harddrop(struct gred_sched_data *q)
Thomas Grafbdc450a2005-11-05 21:14:28 +0100147{
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800148 return q->red_flags & TC_RED_HARDDROP;
Thomas Grafbdc450a2005-11-05 21:14:28 +0100149}
150
Jakub Kicinski72111012018-11-14 22:23:51 -0800151static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152{
153 unsigned int i;
154
155 /* Local per-vq flags couldn't have been set unless global are 0 */
156 if (table->red_flags)
157 return false;
158 for (i = 0; i < MAX_DPs; i++)
159 if (table->tab[i] && table->tab[i]->red_flags)
160 return true;
161 return false;
162}
163
Eric Dumazet520ac302016-06-21 23:16:49 -0700164static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
165 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000167 struct gred_sched_data *q = NULL;
168 struct gred_sched *t = qdisc_priv(sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100169 unsigned long qavg = 0;
Thomas Graf4a591832005-11-05 21:14:22 +0100170 u16 dp = tc_index_to_dp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000172 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
Thomas Graf18e3fb842005-11-05 21:14:21 +0100173 dp = t->def;
174
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000175 q = t->tab[dp];
176 if (!q) {
Thomas Graf18e3fb842005-11-05 21:14:21 +0100177 /* Pass through packets not assigned to a DP
178 * if no default DP has been configured. This
179 * allows for DP flows to be left untouched.
180 */
David Warda3eb95f2015-05-09 22:01:46 -0400181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
182 sch->limit))
Thomas Graf18e3fb842005-11-05 21:14:21 +0100183 return qdisc_enqueue_tail(skb, sch);
184 else
185 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100187
Eric Dumazeteeca6682012-01-05 02:25:16 +0000188 /* fix tc_index? --could be controversial but needed for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 requeueing */
Thomas Graf18e3fb842005-11-05 21:14:21 +0100190 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 }
192
David Warde29fe832012-09-13 05:22:32 +0000193 /* sum up all the qaves of prios < ours to get the new qave */
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100194 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100195 int i;
196
197 for (i = 0; i < t->DPs; i++) {
198 if (t->tab[i] && t->tab[i]->prio < q->prio &&
Eric Dumazeteeca6682012-01-05 02:25:16 +0000199 !red_is_idling(&t->tab[i]->vars))
200 qavg += t->tab[i]->vars.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 }
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 }
204
205 q->packetsin++;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700206 q->bytesin += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100208 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100209 gred_load_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Eric Dumazeteeca6682012-01-05 02:25:16 +0000211 q->vars.qavg = red_calc_qavg(&q->parms,
212 &q->vars,
213 gred_backlog(t, q, sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Eric Dumazeteeca6682012-01-05 02:25:16 +0000215 if (red_is_idling(&q->vars))
216 red_end_of_idle_period(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Thomas Grafdea3f622005-11-05 21:14:09 +0100218 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100219 gred_store_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Eric Dumazeteeca6682012-01-05 02:25:16 +0000221 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000222 case RED_DONT_MARK:
223 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100224
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000225 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700226 qdisc_qstats_overlimit(sch);
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800227 if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000228 q->stats.prob_drop++;
229 goto congestion_drop;
230 }
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100231
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000232 q->stats.prob_mark++;
233 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100234
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000235 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700236 qdisc_qstats_overlimit(sch);
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800237 if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000238 !INET_ECN_set_ce(skb)) {
239 q->stats.forced_drop++;
240 goto congestion_drop;
241 }
242 q->stats.forced_mark++;
243 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100244 }
245
David Ward145a42b2015-05-09 22:01:47 -0400246 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700247 q->backlog += qdisc_pkt_len(skb);
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100248 return qdisc_enqueue_tail(skb, sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Thomas Graf22b33422005-11-05 21:14:16 +0100251 q->stats.pdrop++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700253 return qdisc_drop(skb, sch, to_free);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100254
255congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700256 qdisc_drop(skb, sch, to_free);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100257 return NET_XMIT_CN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000260static struct sk_buff *gred_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
262 struct sk_buff *skb;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100263 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100265 skb = qdisc_dequeue_head(sch);
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 if (skb) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100268 struct gred_sched_data *q;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100269 u16 dp = tc_index_to_dp(skb);
270
271 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
Joe Perchese87cc472012-05-13 21:56:26 +0000272 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
273 tc_index_to_dp(skb));
Thomas Graf18e3fb842005-11-05 21:14:21 +0100274 } else {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700275 q->backlog -= qdisc_pkt_len(skb);
Thomas Graf18e3fb842005-11-05 21:14:21 +0100276
David Wardba1bf472012-09-13 05:22:35 +0000277 if (gred_wred_mode(t)) {
278 if (!sch->qstats.backlog)
279 red_start_of_idle_period(&t->wred_set);
280 } else {
281 if (!q->backlog)
282 red_start_of_idle_period(&q->vars);
283 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 return skb;
287 }
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 return NULL;
290}
291
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000292static void gred_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
294 int i;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100295 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100297 qdisc_reset_queue(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900299 for (i = 0; i < t->DPs; i++) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100300 struct gred_sched_data *q = t->tab[i];
301
302 if (!q)
303 continue;
304
Eric Dumazeteeca6682012-01-05 02:25:16 +0000305 red_restart(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 q->backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 }
308}
309
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800310static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311{
312 struct gred_sched *table = qdisc_priv(sch);
313 struct net_device *dev = qdisc_dev(sch);
314 struct tc_gred_qopt_offload opt = {
315 .command = command,
316 .handle = sch->handle,
317 .parent = sch->parent,
318 };
319
320 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321 return;
322
323 if (command == TC_GRED_REPLACE) {
324 unsigned int i;
325
326 opt.set.grio_on = gred_rio_mode(table);
327 opt.set.wred_on = gred_wred_mode(table);
328 opt.set.dp_cnt = table->DPs;
329 opt.set.dp_def = table->def;
330
331 for (i = 0; i < table->DPs; i++) {
332 struct gred_sched_data *q = table->tab[i];
333
334 if (!q)
335 continue;
336 opt.set.tab[i].present = true;
337 opt.set.tab[i].limit = q->limit;
338 opt.set.tab[i].prio = q->prio;
339 opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340 opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341 opt.set.tab[i].is_ecn = gred_use_ecn(q);
342 opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343 opt.set.tab[i].probability = q->parms.max_P;
344 opt.set.tab[i].backlog = &q->backlog;
345 }
346 opt.set.qstats = &sch->qstats;
347 }
348
349 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350}
351
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800352static int gred_offload_dump_stats(struct Qdisc *sch)
353{
354 struct gred_sched *table = qdisc_priv(sch);
355 struct tc_gred_qopt_offload *hw_stats;
356 unsigned int i;
357 int ret;
358
359 hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
360 if (!hw_stats)
361 return -ENOMEM;
362
363 hw_stats->command = TC_GRED_STATS;
364 hw_stats->handle = sch->handle;
365 hw_stats->parent = sch->parent;
366
367 for (i = 0; i < MAX_DPs; i++)
368 if (table->tab[i])
369 hw_stats->stats.xstats[i] = &table->tab[i]->stats;
370
371 ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
372 /* Even if driver returns failure adjust the stats - in case offload
373 * ended but driver still wants to adjust the values.
374 */
375 for (i = 0; i < MAX_DPs; i++) {
376 if (!table->tab[i])
377 continue;
378 table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
379 table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
380 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
381
382 _bstats_update(&sch->bstats,
383 hw_stats->stats.bstats[i].bytes,
384 hw_stats->stats.bstats[i].packets);
385 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
386 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
387 sch->qstats.drops += hw_stats->stats.qstats[i].drops;
388 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
389 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
390 }
391
392 kfree(hw_stats);
393 return ret;
394}
395
Thomas Graf66396072005-11-05 21:14:13 +0100396static inline void gred_destroy_vq(struct gred_sched_data *q)
397{
398 kfree(q);
399}
400
Jakub Kicinski4777be02018-11-14 22:23:47 -0800401static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
402 struct netlink_ext_ack *extack)
Thomas Graf66396072005-11-05 21:14:13 +0100403{
404 struct gred_sched *table = qdisc_priv(sch);
405 struct tc_gred_sopt *sopt;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800406 bool red_flags_changed;
Thomas Graf66396072005-11-05 21:14:13 +0100407 int i;
408
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500409 if (!dps)
Thomas Graf66396072005-11-05 21:14:13 +0100410 return -EINVAL;
411
Patrick McHardy1e904742008-01-22 22:11:17 -0800412 sopt = nla_data(dps);
Thomas Graf66396072005-11-05 21:14:13 +0100413
Jakub Kicinski4777be02018-11-14 22:23:47 -0800414 if (sopt->DPs > MAX_DPs) {
415 NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
Thomas Graf66396072005-11-05 21:14:13 +0100416 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800417 }
418 if (sopt->DPs == 0) {
419 NL_SET_ERR_MSG_MOD(extack,
420 "number of virtual queues can't be 0");
421 return -EINVAL;
422 }
423 if (sopt->def_DP >= sopt->DPs) {
424 NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
425 return -EINVAL;
426 }
Jakub Kicinski72111012018-11-14 22:23:51 -0800427 if (sopt->flags && gred_per_vq_red_flags_used(table)) {
428 NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
429 return -EINVAL;
430 }
Thomas Graf66396072005-11-05 21:14:13 +0100431
432 sch_tree_lock(sch);
433 table->DPs = sopt->DPs;
434 table->def = sopt->def_DP;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800435 red_flags_changed = table->red_flags != sopt->flags;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100436 table->red_flags = sopt->flags;
Thomas Graf66396072005-11-05 21:14:13 +0100437
438 /*
439 * Every entry point to GRED is synchronized with the above code
440 * and the DP is checked against DPs, i.e. shadowed VQs can no
441 * longer be found so we can unlock right here.
442 */
443 sch_tree_unlock(sch);
444
445 if (sopt->grio) {
446 gred_enable_rio_mode(table);
447 gred_disable_wred_mode(table);
448 if (gred_wred_mode_check(sch))
449 gred_enable_wred_mode(table);
450 } else {
451 gred_disable_rio_mode(table);
452 gred_disable_wred_mode(table);
453 }
454
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800455 if (red_flags_changed)
456 for (i = 0; i < table->DPs; i++)
457 if (table->tab[i])
458 table->tab[i]->red_flags =
459 table->red_flags & GRED_VQ_RED_FLAGS;
460
Thomas Graf66396072005-11-05 21:14:13 +0100461 for (i = table->DPs; i < MAX_DPs; i++) {
462 if (table->tab[i]) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800463 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
464 i);
Thomas Graf66396072005-11-05 21:14:13 +0100465 gred_destroy_vq(table->tab[i]);
466 table->tab[i] = NULL;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900467 }
Thomas Graf66396072005-11-05 21:14:13 +0100468 }
469
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800470 gred_offload(sch, TC_GRED_REPLACE);
Thomas Graf66396072005-11-05 21:14:13 +0100471 return 0;
472}
473
Thomas Graff62d6b92005-11-05 21:14:15 +0100474static inline int gred_change_vq(struct Qdisc *sch, int dp,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000475 struct tc_gred_qopt *ctl, int prio,
Eric Dumazet869aa412011-12-15 22:09:45 +0000476 u8 *stab, u32 max_P,
Jakub Kicinski4777be02018-11-14 22:23:47 -0800477 struct gred_sched_data **prealloc,
478 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
480 struct gred_sched *table = qdisc_priv(sch);
Eric Dumazet869aa412011-12-15 22:09:45 +0000481 struct gred_sched_data *q = table->tab[dp];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Jakub Kicinski4777be02018-11-14 22:23:47 -0800483 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
484 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200485 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800486 }
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200487
Eric Dumazet869aa412011-12-15 22:09:45 +0000488 if (!q) {
489 table->tab[dp] = q = *prealloc;
490 *prealloc = NULL;
491 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 return -ENOMEM;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800493 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 }
495
Thomas Graff62d6b92005-11-05 21:14:15 +0100496 q->DP = dp;
497 q->prio = prio;
David Warda3eb95f2015-05-09 22:01:46 -0400498 if (ctl->limit > sch->limit)
499 q->limit = sch->limit;
500 else
501 q->limit = ctl->limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Thomas Graf22b33422005-11-05 21:14:16 +0100503 if (q->backlog == 0)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000504 red_end_of_idle_period(&q->vars);
Thomas Graf22b33422005-11-05 21:14:16 +0100505
506 red_set_parms(&q->parms,
507 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000508 ctl->Scell_log, stab, max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000509 red_set_vars(&q->vars);
Thomas Graff62d6b92005-11-05 21:14:15 +0100510 return 0;
511}
512
Jakub Kicinski72111012018-11-14 22:23:51 -0800513static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
514 [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
515 [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
516};
517
518static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
519 [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
520};
521
Patrick McHardy27a34212008-01-23 20:35:39 -0800522static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
523 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
524 [TCA_GRED_STAB] = { .len = 256 },
525 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000526 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
David Warda3eb95f2015-05-09 22:01:46 -0400527 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
Jakub Kicinski72111012018-11-14 22:23:51 -0800528 [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
Patrick McHardy27a34212008-01-23 20:35:39 -0800529};
530
Jakub Kicinski72111012018-11-14 22:23:51 -0800531static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
532{
533 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
534 u32 dp;
535
Johannes Berg8cb08172019-04-26 14:07:28 +0200536 nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
537 gred_vq_policy, NULL);
Jakub Kicinski72111012018-11-14 22:23:51 -0800538
539 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
540
541 if (tb[TCA_GRED_VQ_FLAGS])
542 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
543}
544
545static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
546{
547 const struct nlattr *attr;
548 int rem;
549
550 nla_for_each_nested(attr, vqs, rem) {
551 switch (nla_type(attr)) {
552 case TCA_GRED_VQ_ENTRY:
553 gred_vq_apply(table, attr);
554 break;
555 }
556 }
557}
558
559static int gred_vq_validate(struct gred_sched *table, u32 cdp,
560 const struct nlattr *entry,
561 struct netlink_ext_ack *extack)
562{
563 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
564 int err;
565 u32 dp;
566
Johannes Berg8cb08172019-04-26 14:07:28 +0200567 err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
568 gred_vq_policy, extack);
Jakub Kicinski72111012018-11-14 22:23:51 -0800569 if (err < 0)
570 return err;
571
572 if (!tb[TCA_GRED_VQ_DP]) {
573 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
574 return -EINVAL;
575 }
576 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
577 if (dp >= table->DPs) {
578 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
579 return -EINVAL;
580 }
581 if (dp != cdp && !table->tab[dp]) {
582 NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
583 return -EINVAL;
584 }
585
586 if (tb[TCA_GRED_VQ_FLAGS]) {
587 u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
588
589 if (table->red_flags && table->red_flags != red_flags) {
590 NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
591 return -EINVAL;
592 }
593 if (red_flags & ~GRED_VQ_RED_FLAGS) {
594 NL_SET_ERR_MSG_MOD(extack,
595 "invalid RED flags specified");
596 return -EINVAL;
597 }
598 }
599
600 return 0;
601}
602
603static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
604 struct nlattr *vqs, struct netlink_ext_ack *extack)
605{
606 const struct nlattr *attr;
607 int rem, err;
608
Johannes Berg8cb08172019-04-26 14:07:28 +0200609 err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
610 gred_vqe_policy, extack);
Jakub Kicinski72111012018-11-14 22:23:51 -0800611 if (err < 0)
612 return err;
613
614 nla_for_each_nested(attr, vqs, rem) {
615 switch (nla_type(attr)) {
616 case TCA_GRED_VQ_ENTRY:
617 err = gred_vq_validate(table, cdp, attr, extack);
618 if (err)
619 return err;
620 break;
621 default:
622 NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
623 return -EINVAL;
624 }
625 }
626
627 if (rem > 0) {
628 NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
629 return -EINVAL;
630 }
631
632 return 0;
633}
634
Alexander Aring20307212017-12-20 12:35:14 -0500635static int gred_change(struct Qdisc *sch, struct nlattr *opt,
636 struct netlink_ext_ack *extack)
Thomas Graff62d6b92005-11-05 21:14:15 +0100637{
638 struct gred_sched *table = qdisc_priv(sch);
639 struct tc_gred_qopt *ctl;
Patrick McHardy1e904742008-01-22 22:11:17 -0800640 struct nlattr *tb[TCA_GRED_MAX + 1];
Patrick McHardycee63722008-01-23 20:33:32 -0800641 int err, prio = GRED_DEF_PRIO;
Thomas Graff62d6b92005-11-05 21:14:15 +0100642 u8 *stab;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000643 u32 max_P;
Eric Dumazet869aa412011-12-15 22:09:45 +0000644 struct gred_sched_data *prealloc;
Thomas Graff62d6b92005-11-05 21:14:15 +0100645
Patrick McHardycee63722008-01-23 20:33:32 -0800646 if (opt == NULL)
Thomas Graff62d6b92005-11-05 21:14:15 +0100647 return -EINVAL;
648
Johannes Berg8cb08172019-04-26 14:07:28 +0200649 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
650 extack);
Patrick McHardycee63722008-01-23 20:33:32 -0800651 if (err < 0)
652 return err;
653
David Warda3eb95f2015-05-09 22:01:46 -0400654 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
655 if (tb[TCA_GRED_LIMIT] != NULL)
656 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
Jakub Kicinski4777be02018-11-14 22:23:47 -0800657 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
David Warda3eb95f2015-05-09 22:01:46 -0400658 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100659
Patrick McHardy1e904742008-01-22 22:11:17 -0800660 if (tb[TCA_GRED_PARMS] == NULL ||
David Warda3eb95f2015-05-09 22:01:46 -0400661 tb[TCA_GRED_STAB] == NULL ||
Jakub Kicinski4777be02018-11-14 22:23:47 -0800662 tb[TCA_GRED_LIMIT] != NULL) {
663 NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
Thomas Graff62d6b92005-11-05 21:14:15 +0100664 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800665 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100666
Eric Dumazeta73ed262011-12-09 02:46:45 +0000667 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
668
Patrick McHardy1e904742008-01-22 22:11:17 -0800669 ctl = nla_data(tb[TCA_GRED_PARMS]);
670 stab = nla_data(tb[TCA_GRED_STAB]);
Thomas Graff62d6b92005-11-05 21:14:15 +0100671
Jakub Kicinski4777be02018-11-14 22:23:47 -0800672 if (ctl->DP >= table->DPs) {
673 NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
Jakub Kicinski255f4802018-11-14 22:23:45 -0800674 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800675 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100676
Jakub Kicinski72111012018-11-14 22:23:51 -0800677 if (tb[TCA_GRED_VQ_LIST]) {
678 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
679 extack);
680 if (err)
681 return err;
682 }
683
Thomas Graff62d6b92005-11-05 21:14:15 +0100684 if (gred_rio_mode(table)) {
685 if (ctl->prio == 0) {
686 int def_prio = GRED_DEF_PRIO;
687
688 if (table->tab[table->def])
689 def_prio = table->tab[table->def]->prio;
690
691 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
692 "setting default to %d\n", ctl->DP, def_prio);
693
694 prio = def_prio;
695 } else
696 prio = ctl->prio;
697 }
698
Eric Dumazet869aa412011-12-15 22:09:45 +0000699 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
Thomas Graff62d6b92005-11-05 21:14:15 +0100700 sch_tree_lock(sch);
701
Jakub Kicinski4777be02018-11-14 22:23:47 -0800702 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
703 extack);
Thomas Graff62d6b92005-11-05 21:14:15 +0100704 if (err < 0)
Jakub Kicinski255f4802018-11-14 22:23:45 -0800705 goto err_unlock_free;
Thomas Graff62d6b92005-11-05 21:14:15 +0100706
Jakub Kicinski72111012018-11-14 22:23:51 -0800707 if (tb[TCA_GRED_VQ_LIST])
708 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
709
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100710 if (gred_rio_mode(table)) {
Thomas Grafdea3f622005-11-05 21:14:09 +0100711 gred_disable_wred_mode(table);
712 if (gred_wred_mode_check(sch))
713 gred_enable_wred_mode(table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 }
715
Thomas Graff62d6b92005-11-05 21:14:15 +0100716 sch_tree_unlock(sch);
Eric Dumazet869aa412011-12-15 22:09:45 +0000717 kfree(prealloc);
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800718
719 gred_offload(sch, TC_GRED_REPLACE);
Jakub Kicinski255f4802018-11-14 22:23:45 -0800720 return 0;
721
722err_unlock_free:
723 sch_tree_unlock(sch);
724 kfree(prealloc);
Thomas Graff62d6b92005-11-05 21:14:15 +0100725 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Alexander Aringe63d7dfd2017-12-20 12:35:13 -0500728static int gred_init(struct Qdisc *sch, struct nlattr *opt,
729 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Patrick McHardy1e904742008-01-22 22:11:17 -0800731 struct nlattr *tb[TCA_GRED_MAX + 1];
Patrick McHardycee63722008-01-23 20:33:32 -0800732 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500734 if (!opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -EINVAL;
736
Johannes Berg8cb08172019-04-26 14:07:28 +0200737 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
738 extack);
Patrick McHardycee63722008-01-23 20:33:32 -0800739 if (err < 0)
740 return err;
741
Jakub Kicinski4777be02018-11-14 22:23:47 -0800742 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
743 NL_SET_ERR_MSG_MOD(extack,
744 "virtual queue configuration can't be specified at initialization time");
Thomas Graf66396072005-11-05 21:14:13 +0100745 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
David Warda3eb95f2015-05-09 22:01:46 -0400748 if (tb[TCA_GRED_LIMIT])
749 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
Phil Sutter348e3432015-08-18 10:30:49 +0200750 else
751 sch->limit = qdisc_dev(sch)->tx_queue_len
752 * psched_mtu(qdisc_dev(sch));
David Warda3eb95f2015-05-09 22:01:46 -0400753
Jakub Kicinski4777be02018-11-14 22:23:47 -0800754 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
756
757static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
758{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 struct gred_sched *table = qdisc_priv(sch);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800760 struct nlattr *parms, *vqs, *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 int i;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000762 u32 max_p[MAX_DPs];
Thomas Grafe0636822005-11-05 21:14:12 +0100763 struct tc_gred_sopt sopt = {
764 .DPs = table->DPs,
765 .def_DP = table->def,
766 .grio = gred_rio_mode(table),
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100767 .flags = table->red_flags,
Thomas Grafe0636822005-11-05 21:14:12 +0100768 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800770 if (gred_offload_dump_stats(sch))
771 goto nla_put_failure;
772
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200773 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800774 if (opts == NULL)
775 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400776 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
777 goto nla_put_failure;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000778
779 for (i = 0; i < MAX_DPs; i++) {
780 struct gred_sched_data *q = table->tab[i];
781
782 max_p[i] = q ? q->parms.max_P : 0;
783 }
David S. Miller1b34ec42012-03-29 05:11:39 -0400784 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
785 goto nla_put_failure;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000786
David Warda3eb95f2015-05-09 22:01:46 -0400787 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
788 goto nla_put_failure;
789
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800790 /* Old style all-in-one dump of VQs */
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200791 parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800792 if (parms == NULL)
793 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
Thomas Graf05f1cc02005-11-05 21:14:11 +0100795 for (i = 0; i < MAX_DPs; i++) {
796 struct gred_sched_data *q = table->tab[i];
797 struct tc_gred_qopt opt;
David Ward1fe37b12012-09-13 05:22:34 +0000798 unsigned long qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Thomas Graf05f1cc02005-11-05 21:14:11 +0100800 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 if (!q) {
803 /* hack -- fix at some point with proper message
804 This is how we indicate to tc that there is no VQ
805 at this DP */
806
Thomas Graf05f1cc02005-11-05 21:14:11 +0100807 opt.DP = MAX_DPs + i;
808 goto append_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
810
Thomas Graf05f1cc02005-11-05 21:14:11 +0100811 opt.limit = q->limit;
812 opt.DP = q->DP;
David Ward145a42b2015-05-09 22:01:47 -0400813 opt.backlog = gred_backlog(table, q, sch);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100814 opt.prio = q->prio;
Thomas Graf22b33422005-11-05 21:14:16 +0100815 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
816 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
817 opt.Wlog = q->parms.Wlog;
818 opt.Plog = q->parms.Plog;
819 opt.Scell_log = q->parms.Scell_log;
820 opt.other = q->stats.other;
821 opt.early = q->stats.prob_drop;
822 opt.forced = q->stats.forced_drop;
823 opt.pdrop = q->stats.pdrop;
Thomas Graf05f1cc02005-11-05 21:14:11 +0100824 opt.packets = q->packetsin;
825 opt.bytesin = q->bytesin;
826
David Ward244b65d2012-04-15 12:31:45 +0000827 if (gred_wred_mode(table))
828 gred_load_wred_set(table, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
David Ward1fe37b12012-09-13 05:22:34 +0000830 qavg = red_calc_qavg(&q->parms, &q->vars,
831 q->vars.qavg >> q->parms.Wlog);
832 opt.qave = qavg >> q->parms.Wlog;
Thomas Graf22b33422005-11-05 21:14:16 +0100833
Thomas Graf05f1cc02005-11-05 21:14:11 +0100834append_opt:
Patrick McHardy1e904742008-01-22 22:11:17 -0800835 if (nla_append(skb, sizeof(opt), &opt) < 0)
836 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 }
838
Patrick McHardy1e904742008-01-22 22:11:17 -0800839 nla_nest_end(skb, parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800841 /* Dump the VQs again, in more structured way */
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200842 vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800843 if (!vqs)
844 goto nla_put_failure;
845
846 for (i = 0; i < MAX_DPs; i++) {
847 struct gred_sched_data *q = table->tab[i];
848 struct nlattr *vq;
849
850 if (!q)
851 continue;
852
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200853 vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800854 if (!vq)
855 goto nla_put_failure;
856
857 if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
858 goto nla_put_failure;
859
Jakub Kicinski72111012018-11-14 22:23:51 -0800860 if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
861 goto nla_put_failure;
862
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800863 /* Stats */
864 if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
865 TCA_GRED_VQ_PAD))
866 goto nla_put_failure;
867 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
868 goto nla_put_failure;
869 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
870 gred_backlog(table, q, sch)))
871 goto nla_put_failure;
872 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
873 q->stats.prob_drop))
874 goto nla_put_failure;
875 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
876 q->stats.prob_mark))
877 goto nla_put_failure;
878 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
879 q->stats.forced_drop))
880 goto nla_put_failure;
881 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
882 q->stats.forced_mark))
883 goto nla_put_failure;
884 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
885 goto nla_put_failure;
886 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
887 goto nla_put_failure;
888
889 nla_nest_end(skb, vq);
890 }
891 nla_nest_end(skb, vqs);
892
Patrick McHardy1e904742008-01-22 22:11:17 -0800893 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
Patrick McHardy1e904742008-01-22 22:11:17 -0800895nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700896 nla_nest_cancel(skb, opts);
897 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898}
899
900static void gred_destroy(struct Qdisc *sch)
901{
902 struct gred_sched *table = qdisc_priv(sch);
903 int i;
904
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100905 for (i = 0; i < table->DPs; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (table->tab[i])
Thomas Graf66396072005-11-05 21:14:13 +0100907 gred_destroy_vq(table->tab[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800909 gred_offload(sch, TC_GRED_DESTROY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
Eric Dumazet20fea082007-11-14 01:44:41 -0800912static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 .id = "gred",
914 .priv_size = sizeof(struct gred_sched),
915 .enqueue = gred_enqueue,
916 .dequeue = gred_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700917 .peek = qdisc_peek_head,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 .init = gred_init,
919 .reset = gred_reset,
920 .destroy = gred_destroy,
921 .change = gred_change,
922 .dump = gred_dump,
923 .owner = THIS_MODULE,
924};
925
926static int __init gred_module_init(void)
927{
928 return register_qdisc(&gred_qdisc_ops);
929}
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100930
931static void __exit gred_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
933 unregister_qdisc(&gred_qdisc_ops);
934}
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936module_init(gred_module_init)
937module_exit(gred_module_exit)
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939MODULE_LICENSE("GPL");