Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/sch_gred.c Generic Random Early Detection queue. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002 |
| 6 | * |
| 7 | * 991129: - Bug fix with grio mode |
| 8 | * - a better sing. AvgQ mode with Grio(WRED) |
| 9 | * - A finer grained VQ dequeue based on sugestion |
| 10 | * from Ren Liu |
| 11 | * - More error checks |
| 12 | * |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 13 | * For all the glorious comments look at include/net/red.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
| 15 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 16 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/types.h> |
| 19 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/skbuff.h> |
Jakub Kicinski | 890d8d2 | 2018-11-19 15:21:42 -0800 | [diff] [blame] | 21 | #include <net/pkt_cls.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <net/pkt_sched.h> |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 23 | #include <net/red.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 25 | #define GRED_DEF_PRIO (MAX_DPs / 2) |
Thomas Graf | 716a1b4 | 2005-11-05 21:14:20 +0100 | [diff] [blame] | 26 | #define GRED_VQ_MASK (MAX_DPs - 1) |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 27 | |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 28 | #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP) |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | struct gred_sched_data; |
| 31 | struct gred_sched; |
| 32 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 33 | struct gred_sched_data { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | u32 limit; /* HARD maximal queue length */ |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 35 | u32 DP; /* the drop parameters */ |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 36 | u32 red_flags; /* virtualQ version of red_flags */ |
Jakub Kicinski | 9f5cd0c | 2018-11-14 22:23:48 -0800 | [diff] [blame] | 37 | u64 bytesin; /* bytes seen on virtualQ so far*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | u32 packetsin; /* packets seen on virtualQ so far*/ |
| 39 | u32 backlog; /* bytes on the virtualQ */ |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 40 | u8 prio; /* the prio of this vq */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 42 | struct red_parms parms; |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 43 | struct red_vars vars; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 44 | struct red_stats stats; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | }; |
| 46 | |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 47 | enum { |
| 48 | GRED_WRED_MODE = 1, |
Thomas Graf | d6fd4e9 | 2005-11-05 21:14:10 +0100 | [diff] [blame] | 49 | GRED_RIO_MODE, |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 50 | }; |
| 51 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 52 | struct gred_sched { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | struct gred_sched_data *tab[MAX_DPs]; |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 54 | unsigned long flags; |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 55 | u32 red_flags; |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 56 | u32 DPs; |
| 57 | u32 def; |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 58 | struct red_vars wred_set; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | }; |
| 60 | |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 61 | static inline int gred_wred_mode(struct gred_sched *table) |
| 62 | { |
| 63 | return test_bit(GRED_WRED_MODE, &table->flags); |
| 64 | } |
| 65 | |
| 66 | static inline void gred_enable_wred_mode(struct gred_sched *table) |
| 67 | { |
| 68 | __set_bit(GRED_WRED_MODE, &table->flags); |
| 69 | } |
| 70 | |
| 71 | static inline void gred_disable_wred_mode(struct gred_sched *table) |
| 72 | { |
| 73 | __clear_bit(GRED_WRED_MODE, &table->flags); |
| 74 | } |
| 75 | |
Thomas Graf | d6fd4e9 | 2005-11-05 21:14:10 +0100 | [diff] [blame] | 76 | static inline int gred_rio_mode(struct gred_sched *table) |
| 77 | { |
| 78 | return test_bit(GRED_RIO_MODE, &table->flags); |
| 79 | } |
| 80 | |
| 81 | static inline void gred_enable_rio_mode(struct gred_sched *table) |
| 82 | { |
| 83 | __set_bit(GRED_RIO_MODE, &table->flags); |
| 84 | } |
| 85 | |
| 86 | static inline void gred_disable_rio_mode(struct gred_sched *table) |
| 87 | { |
| 88 | __clear_bit(GRED_RIO_MODE, &table->flags); |
| 89 | } |
| 90 | |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 91 | static inline int gred_wred_mode_check(struct Qdisc *sch) |
| 92 | { |
| 93 | struct gred_sched *table = qdisc_priv(sch); |
| 94 | int i; |
| 95 | |
| 96 | /* Really ugly O(n^2) but shouldn't be necessary too frequent. */ |
| 97 | for (i = 0; i < table->DPs; i++) { |
| 98 | struct gred_sched_data *q = table->tab[i]; |
| 99 | int n; |
| 100 | |
| 101 | if (q == NULL) |
| 102 | continue; |
| 103 | |
David Ward | c22e464 | 2012-09-13 05:22:33 +0000 | [diff] [blame] | 104 | for (n = i + 1; n < table->DPs; n++) |
| 105 | if (table->tab[n] && table->tab[n]->prio == q->prio) |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 106 | return 1; |
| 107 | } |
| 108 | |
| 109 | return 0; |
| 110 | } |
| 111 | |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 112 | static inline unsigned int gred_backlog(struct gred_sched *table, |
| 113 | struct gred_sched_data *q, |
| 114 | struct Qdisc *sch) |
| 115 | { |
| 116 | if (gred_wred_mode(table)) |
| 117 | return sch->qstats.backlog; |
| 118 | else |
| 119 | return q->backlog; |
| 120 | } |
| 121 | |
Thomas Graf | 716a1b4 | 2005-11-05 21:14:20 +0100 | [diff] [blame] | 122 | static inline u16 tc_index_to_dp(struct sk_buff *skb) |
| 123 | { |
| 124 | return skb->tc_index & GRED_VQ_MASK; |
| 125 | } |
| 126 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 127 | static inline void gred_load_wred_set(const struct gred_sched *table, |
Thomas Graf | 7051703 | 2005-11-05 21:14:23 +0100 | [diff] [blame] | 128 | struct gred_sched_data *q) |
| 129 | { |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 130 | q->vars.qavg = table->wred_set.qavg; |
| 131 | q->vars.qidlestart = table->wred_set.qidlestart; |
Thomas Graf | 7051703 | 2005-11-05 21:14:23 +0100 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | static inline void gred_store_wred_set(struct gred_sched *table, |
| 135 | struct gred_sched_data *q) |
| 136 | { |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 137 | table->wred_set.qavg = q->vars.qavg; |
David Ward | ba1bf47 | 2012-09-13 05:22:35 +0000 | [diff] [blame] | 138 | table->wred_set.qidlestart = q->vars.qidlestart; |
Thomas Graf | 7051703 | 2005-11-05 21:14:23 +0100 | [diff] [blame] | 139 | } |
| 140 | |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 141 | static int gred_use_ecn(struct gred_sched_data *q) |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 142 | { |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 143 | return q->red_flags & TC_RED_ECN; |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 144 | } |
| 145 | |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 146 | static int gred_use_harddrop(struct gred_sched_data *q) |
Thomas Graf | bdc450a | 2005-11-05 21:14:28 +0100 | [diff] [blame] | 147 | { |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 148 | return q->red_flags & TC_RED_HARDDROP; |
Thomas Graf | bdc450a | 2005-11-05 21:14:28 +0100 | [diff] [blame] | 149 | } |
| 150 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 151 | static bool gred_per_vq_red_flags_used(struct gred_sched *table) |
| 152 | { |
| 153 | unsigned int i; |
| 154 | |
| 155 | /* Local per-vq flags couldn't have been set unless global are 0 */ |
| 156 | if (table->red_flags) |
| 157 | return false; |
| 158 | for (i = 0; i < MAX_DPs; i++) |
| 159 | if (table->tab[i] && table->tab[i]->red_flags) |
| 160 | return true; |
| 161 | return false; |
| 162 | } |
| 163 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 164 | static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 165 | struct sk_buff **to_free) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | { |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 167 | struct gred_sched_data *q = NULL; |
| 168 | struct gred_sched *t = qdisc_priv(sch); |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 169 | unsigned long qavg = 0; |
Thomas Graf | 4a59183 | 2005-11-05 21:14:22 +0100 | [diff] [blame] | 170 | u16 dp = tc_index_to_dp(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 172 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 173 | dp = t->def; |
| 174 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 175 | q = t->tab[dp]; |
| 176 | if (!q) { |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 177 | /* Pass through packets not assigned to a DP |
| 178 | * if no default DP has been configured. This |
| 179 | * allows for DP flows to be left untouched. |
| 180 | */ |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 181 | if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= |
| 182 | sch->limit)) |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 183 | return qdisc_enqueue_tail(skb, sch); |
| 184 | else |
| 185 | goto drop; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | } |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 187 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 188 | /* fix tc_index? --could be controversial but needed for |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | requeueing */ |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 190 | skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
| 192 | |
David Ward | e29fe83 | 2012-09-13 05:22:32 +0000 | [diff] [blame] | 193 | /* sum up all the qaves of prios < ours to get the new qave */ |
Thomas Graf | d6fd4e9 | 2005-11-05 21:14:10 +0100 | [diff] [blame] | 194 | if (!gred_wred_mode(t) && gred_rio_mode(t)) { |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 195 | int i; |
| 196 | |
| 197 | for (i = 0; i < t->DPs; i++) { |
| 198 | if (t->tab[i] && t->tab[i]->prio < q->prio && |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 199 | !red_is_idling(&t->tab[i]->vars)) |
| 200 | qavg += t->tab[i]->vars.qavg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | q->packetsin++; |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 206 | q->bytesin += qdisc_pkt_len(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 208 | if (gred_wred_mode(t)) |
Thomas Graf | 7051703 | 2005-11-05 21:14:23 +0100 | [diff] [blame] | 209 | gred_load_wred_set(t, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 211 | q->vars.qavg = red_calc_qavg(&q->parms, |
| 212 | &q->vars, |
| 213 | gred_backlog(t, q, sch)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 215 | if (red_is_idling(&q->vars)) |
| 216 | red_end_of_idle_period(&q->vars); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 218 | if (gred_wred_mode(t)) |
Thomas Graf | 7051703 | 2005-11-05 21:14:23 +0100 | [diff] [blame] | 219 | gred_store_wred_set(t, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 221 | switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 222 | case RED_DONT_MARK: |
| 223 | break; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 224 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 225 | case RED_PROB_MARK: |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 226 | qdisc_qstats_overlimit(sch); |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 227 | if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) { |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 228 | q->stats.prob_drop++; |
| 229 | goto congestion_drop; |
| 230 | } |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 231 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 232 | q->stats.prob_mark++; |
| 233 | break; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 234 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 235 | case RED_HARD_MARK: |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 236 | qdisc_qstats_overlimit(sch); |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 237 | if (gred_use_harddrop(q) || !gred_use_ecn(q) || |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 238 | !INET_ECN_set_ce(skb)) { |
| 239 | q->stats.forced_drop++; |
| 240 | goto congestion_drop; |
| 241 | } |
| 242 | q->stats.forced_mark++; |
| 243 | break; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 244 | } |
| 245 | |
David Ward | 145a42b | 2015-05-09 22:01:47 -0400 | [diff] [blame] | 246 | if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 247 | q->backlog += qdisc_pkt_len(skb); |
Thomas Graf | edf7a7b | 2005-11-05 21:14:19 +0100 | [diff] [blame] | 248 | return qdisc_enqueue_tail(skb, sch); |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 251 | q->stats.pdrop++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | drop: |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 253 | return qdisc_drop(skb, sch, to_free); |
Thomas Graf | c3b553c | 2005-11-05 21:14:18 +0100 | [diff] [blame] | 254 | |
| 255 | congestion_drop: |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 256 | qdisc_drop(skb, sch, to_free); |
Thomas Graf | c3b553c | 2005-11-05 21:14:18 +0100 | [diff] [blame] | 257 | return NET_XMIT_CN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | } |
| 259 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 260 | static struct sk_buff *gred_dequeue(struct Qdisc *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | { |
| 262 | struct sk_buff *skb; |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 263 | struct gred_sched *t = qdisc_priv(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Thomas Graf | edf7a7b | 2005-11-05 21:14:19 +0100 | [diff] [blame] | 265 | skb = qdisc_dequeue_head(sch); |
| 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | if (skb) { |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 268 | struct gred_sched_data *q; |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 269 | u16 dp = tc_index_to_dp(skb); |
| 270 | |
| 271 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
Joe Perches | e87cc47 | 2012-05-13 21:56:26 +0000 | [diff] [blame] | 272 | net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", |
| 273 | tc_index_to_dp(skb)); |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 274 | } else { |
Jussi Kivilinna | 0abf77e | 2008-07-20 00:08:27 -0700 | [diff] [blame] | 275 | q->backlog -= qdisc_pkt_len(skb); |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 276 | |
David Ward | ba1bf47 | 2012-09-13 05:22:35 +0000 | [diff] [blame] | 277 | if (gred_wred_mode(t)) { |
| 278 | if (!sch->qstats.backlog) |
| 279 | red_start_of_idle_period(&t->wred_set); |
| 280 | } else { |
| 281 | if (!q->backlog) |
| 282 | red_start_of_idle_period(&q->vars); |
| 283 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
Thomas Graf | 18e3fb84 | 2005-11-05 21:14:21 +0100 | [diff] [blame] | 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | return skb; |
| 287 | } |
| 288 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | return NULL; |
| 290 | } |
| 291 | |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 292 | static void gred_reset(struct Qdisc *sch) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { |
| 294 | int i; |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 295 | struct gred_sched *t = qdisc_priv(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
Thomas Graf | edf7a7b | 2005-11-05 21:14:19 +0100 | [diff] [blame] | 297 | qdisc_reset_queue(sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 299 | for (i = 0; i < t->DPs; i++) { |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 300 | struct gred_sched_data *q = t->tab[i]; |
| 301 | |
| 302 | if (!q) |
| 303 | continue; |
| 304 | |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 305 | red_restart(&q->vars); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | q->backlog = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | } |
| 308 | } |
| 309 | |
Jakub Kicinski | 890d8d2 | 2018-11-19 15:21:42 -0800 | [diff] [blame] | 310 | static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) |
| 311 | { |
| 312 | struct gred_sched *table = qdisc_priv(sch); |
| 313 | struct net_device *dev = qdisc_dev(sch); |
| 314 | struct tc_gred_qopt_offload opt = { |
| 315 | .command = command, |
| 316 | .handle = sch->handle, |
| 317 | .parent = sch->parent, |
| 318 | }; |
| 319 | |
| 320 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) |
| 321 | return; |
| 322 | |
| 323 | if (command == TC_GRED_REPLACE) { |
| 324 | unsigned int i; |
| 325 | |
| 326 | opt.set.grio_on = gred_rio_mode(table); |
| 327 | opt.set.wred_on = gred_wred_mode(table); |
| 328 | opt.set.dp_cnt = table->DPs; |
| 329 | opt.set.dp_def = table->def; |
| 330 | |
| 331 | for (i = 0; i < table->DPs; i++) { |
| 332 | struct gred_sched_data *q = table->tab[i]; |
| 333 | |
| 334 | if (!q) |
| 335 | continue; |
| 336 | opt.set.tab[i].present = true; |
| 337 | opt.set.tab[i].limit = q->limit; |
| 338 | opt.set.tab[i].prio = q->prio; |
| 339 | opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; |
| 340 | opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; |
| 341 | opt.set.tab[i].is_ecn = gred_use_ecn(q); |
| 342 | opt.set.tab[i].is_harddrop = gred_use_harddrop(q); |
| 343 | opt.set.tab[i].probability = q->parms.max_P; |
| 344 | opt.set.tab[i].backlog = &q->backlog; |
| 345 | } |
| 346 | opt.set.qstats = &sch->qstats; |
| 347 | } |
| 348 | |
| 349 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt); |
| 350 | } |
| 351 | |
Jakub Kicinski | e49efd5 | 2018-11-19 15:21:43 -0800 | [diff] [blame] | 352 | static int gred_offload_dump_stats(struct Qdisc *sch) |
| 353 | { |
| 354 | struct gred_sched *table = qdisc_priv(sch); |
| 355 | struct tc_gred_qopt_offload *hw_stats; |
| 356 | unsigned int i; |
| 357 | int ret; |
| 358 | |
| 359 | hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL); |
| 360 | if (!hw_stats) |
| 361 | return -ENOMEM; |
| 362 | |
| 363 | hw_stats->command = TC_GRED_STATS; |
| 364 | hw_stats->handle = sch->handle; |
| 365 | hw_stats->parent = sch->parent; |
| 366 | |
| 367 | for (i = 0; i < MAX_DPs; i++) |
| 368 | if (table->tab[i]) |
| 369 | hw_stats->stats.xstats[i] = &table->tab[i]->stats; |
| 370 | |
| 371 | ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); |
| 372 | /* Even if driver returns failure adjust the stats - in case offload |
| 373 | * ended but driver still wants to adjust the values. |
| 374 | */ |
| 375 | for (i = 0; i < MAX_DPs; i++) { |
| 376 | if (!table->tab[i]) |
| 377 | continue; |
| 378 | table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; |
| 379 | table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; |
| 380 | table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; |
| 381 | |
| 382 | _bstats_update(&sch->bstats, |
| 383 | hw_stats->stats.bstats[i].bytes, |
| 384 | hw_stats->stats.bstats[i].packets); |
| 385 | sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; |
| 386 | sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; |
| 387 | sch->qstats.drops += hw_stats->stats.qstats[i].drops; |
| 388 | sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; |
| 389 | sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; |
| 390 | } |
| 391 | |
| 392 | kfree(hw_stats); |
| 393 | return ret; |
| 394 | } |
| 395 | |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 396 | static inline void gred_destroy_vq(struct gred_sched_data *q) |
| 397 | { |
| 398 | kfree(q); |
| 399 | } |
| 400 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 401 | static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, |
| 402 | struct netlink_ext_ack *extack) |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 403 | { |
| 404 | struct gred_sched *table = qdisc_priv(sch); |
| 405 | struct tc_gred_sopt *sopt; |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 406 | bool red_flags_changed; |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 407 | int i; |
| 408 | |
Alexander Aring | ac8ef4a | 2017-12-20 12:35:11 -0500 | [diff] [blame] | 409 | if (!dps) |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 410 | return -EINVAL; |
| 411 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 412 | sopt = nla_data(dps); |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 413 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 414 | if (sopt->DPs > MAX_DPs) { |
| 415 | NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high"); |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 416 | return -EINVAL; |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 417 | } |
| 418 | if (sopt->DPs == 0) { |
| 419 | NL_SET_ERR_MSG_MOD(extack, |
| 420 | "number of virtual queues can't be 0"); |
| 421 | return -EINVAL; |
| 422 | } |
| 423 | if (sopt->def_DP >= sopt->DPs) { |
| 424 | NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count"); |
| 425 | return -EINVAL; |
| 426 | } |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 427 | if (sopt->flags && gred_per_vq_red_flags_used(table)) { |
| 428 | NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used"); |
| 429 | return -EINVAL; |
| 430 | } |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 431 | |
| 432 | sch_tree_lock(sch); |
| 433 | table->DPs = sopt->DPs; |
| 434 | table->def = sopt->def_DP; |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 435 | red_flags_changed = table->red_flags != sopt->flags; |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 436 | table->red_flags = sopt->flags; |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 437 | |
| 438 | /* |
| 439 | * Every entry point to GRED is synchronized with the above code |
| 440 | * and the DP is checked against DPs, i.e. shadowed VQs can no |
| 441 | * longer be found so we can unlock right here. |
| 442 | */ |
| 443 | sch_tree_unlock(sch); |
| 444 | |
| 445 | if (sopt->grio) { |
| 446 | gred_enable_rio_mode(table); |
| 447 | gred_disable_wred_mode(table); |
| 448 | if (gred_wred_mode_check(sch)) |
| 449 | gred_enable_wred_mode(table); |
| 450 | } else { |
| 451 | gred_disable_rio_mode(table); |
| 452 | gred_disable_wred_mode(table); |
| 453 | } |
| 454 | |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 455 | if (red_flags_changed) |
| 456 | for (i = 0; i < table->DPs; i++) |
| 457 | if (table->tab[i]) |
| 458 | table->tab[i]->red_flags = |
| 459 | table->red_flags & GRED_VQ_RED_FLAGS; |
| 460 | |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 461 | for (i = table->DPs; i < MAX_DPs; i++) { |
| 462 | if (table->tab[i]) { |
Yang Yingliang | c17988a | 2013-12-23 17:38:58 +0800 | [diff] [blame] | 463 | pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n", |
| 464 | i); |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 465 | gred_destroy_vq(table->tab[i]); |
| 466 | table->tab[i] = NULL; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 467 | } |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 468 | } |
| 469 | |
Jakub Kicinski | 890d8d2 | 2018-11-19 15:21:42 -0800 | [diff] [blame] | 470 | gred_offload(sch, TC_GRED_REPLACE); |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 471 | return 0; |
| 472 | } |
| 473 | |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 474 | static inline int gred_change_vq(struct Qdisc *sch, int dp, |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 475 | struct tc_gred_qopt *ctl, int prio, |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 476 | u8 *stab, u32 max_P, |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 477 | struct gred_sched_data **prealloc, |
| 478 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | { |
| 480 | struct gred_sched *table = qdisc_priv(sch); |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 481 | struct gred_sched_data *q = table->tab[dp]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 483 | if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) { |
| 484 | NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); |
Nogah Frankel | 8afa10c | 2017-12-04 13:31:11 +0200 | [diff] [blame] | 485 | return -EINVAL; |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 486 | } |
Nogah Frankel | 8afa10c | 2017-12-04 13:31:11 +0200 | [diff] [blame] | 487 | |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 488 | if (!q) { |
| 489 | table->tab[dp] = q = *prealloc; |
| 490 | *prealloc = NULL; |
| 491 | if (!q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | return -ENOMEM; |
Jakub Kicinski | 25fc198 | 2018-11-14 22:23:50 -0800 | [diff] [blame] | 493 | q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | } |
| 495 | |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 496 | q->DP = dp; |
| 497 | q->prio = prio; |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 498 | if (ctl->limit > sch->limit) |
| 499 | q->limit = sch->limit; |
| 500 | else |
| 501 | q->limit = ctl->limit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 503 | if (q->backlog == 0) |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 504 | red_end_of_idle_period(&q->vars); |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 505 | |
| 506 | red_set_parms(&q->parms, |
| 507 | ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 508 | ctl->Scell_log, stab, max_P); |
Eric Dumazet | eeca668 | 2012-01-05 02:25:16 +0000 | [diff] [blame] | 509 | red_set_vars(&q->vars); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 510 | return 0; |
| 511 | } |
| 512 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 513 | static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = { |
| 514 | [TCA_GRED_VQ_DP] = { .type = NLA_U32 }, |
| 515 | [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 }, |
| 516 | }; |
| 517 | |
| 518 | static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = { |
| 519 | [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED }, |
| 520 | }; |
| 521 | |
Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 522 | static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { |
| 523 | [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, |
| 524 | [TCA_GRED_STAB] = { .len = 256 }, |
| 525 | [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 526 | [TCA_GRED_MAX_P] = { .type = NLA_U32 }, |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 527 | [TCA_GRED_LIMIT] = { .type = NLA_U32 }, |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 528 | [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED }, |
Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 529 | }; |
| 530 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 531 | static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry) |
| 532 | { |
| 533 | struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; |
| 534 | u32 dp; |
| 535 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 536 | nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, |
| 537 | gred_vq_policy, NULL); |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 538 | |
| 539 | dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); |
| 540 | |
| 541 | if (tb[TCA_GRED_VQ_FLAGS]) |
| 542 | table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); |
| 543 | } |
| 544 | |
| 545 | static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) |
| 546 | { |
| 547 | const struct nlattr *attr; |
| 548 | int rem; |
| 549 | |
| 550 | nla_for_each_nested(attr, vqs, rem) { |
| 551 | switch (nla_type(attr)) { |
| 552 | case TCA_GRED_VQ_ENTRY: |
| 553 | gred_vq_apply(table, attr); |
| 554 | break; |
| 555 | } |
| 556 | } |
| 557 | } |
| 558 | |
| 559 | static int gred_vq_validate(struct gred_sched *table, u32 cdp, |
| 560 | const struct nlattr *entry, |
| 561 | struct netlink_ext_ack *extack) |
| 562 | { |
| 563 | struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; |
| 564 | int err; |
| 565 | u32 dp; |
| 566 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 567 | err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, |
| 568 | gred_vq_policy, extack); |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 569 | if (err < 0) |
| 570 | return err; |
| 571 | |
| 572 | if (!tb[TCA_GRED_VQ_DP]) { |
| 573 | NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified"); |
| 574 | return -EINVAL; |
| 575 | } |
| 576 | dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); |
| 577 | if (dp >= table->DPs) { |
| 578 | NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds"); |
| 579 | return -EINVAL; |
| 580 | } |
| 581 | if (dp != cdp && !table->tab[dp]) { |
| 582 | NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated"); |
| 583 | return -EINVAL; |
| 584 | } |
| 585 | |
| 586 | if (tb[TCA_GRED_VQ_FLAGS]) { |
| 587 | u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); |
| 588 | |
| 589 | if (table->red_flags && table->red_flags != red_flags) { |
| 590 | NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used"); |
| 591 | return -EINVAL; |
| 592 | } |
| 593 | if (red_flags & ~GRED_VQ_RED_FLAGS) { |
| 594 | NL_SET_ERR_MSG_MOD(extack, |
| 595 | "invalid RED flags specified"); |
| 596 | return -EINVAL; |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | return 0; |
| 601 | } |
| 602 | |
| 603 | static int gred_vqs_validate(struct gred_sched *table, u32 cdp, |
| 604 | struct nlattr *vqs, struct netlink_ext_ack *extack) |
| 605 | { |
| 606 | const struct nlattr *attr; |
| 607 | int rem, err; |
| 608 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 609 | err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, |
| 610 | gred_vqe_policy, extack); |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 611 | if (err < 0) |
| 612 | return err; |
| 613 | |
| 614 | nla_for_each_nested(attr, vqs, rem) { |
| 615 | switch (nla_type(attr)) { |
| 616 | case TCA_GRED_VQ_ENTRY: |
| 617 | err = gred_vq_validate(table, cdp, attr, extack); |
| 618 | if (err) |
| 619 | return err; |
| 620 | break; |
| 621 | default: |
| 622 | NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes"); |
| 623 | return -EINVAL; |
| 624 | } |
| 625 | } |
| 626 | |
| 627 | if (rem > 0) { |
| 628 | NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list"); |
| 629 | return -EINVAL; |
| 630 | } |
| 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 635 | static int gred_change(struct Qdisc *sch, struct nlattr *opt, |
| 636 | struct netlink_ext_ack *extack) |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 637 | { |
| 638 | struct gred_sched *table = qdisc_priv(sch); |
| 639 | struct tc_gred_qopt *ctl; |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 640 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
Patrick McHardy | cee6372 | 2008-01-23 20:33:32 -0800 | [diff] [blame] | 641 | int err, prio = GRED_DEF_PRIO; |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 642 | u8 *stab; |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 643 | u32 max_P; |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 644 | struct gred_sched_data *prealloc; |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 645 | |
Patrick McHardy | cee6372 | 2008-01-23 20:33:32 -0800 | [diff] [blame] | 646 | if (opt == NULL) |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 647 | return -EINVAL; |
| 648 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 649 | err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, |
| 650 | extack); |
Patrick McHardy | cee6372 | 2008-01-23 20:33:32 -0800 | [diff] [blame] | 651 | if (err < 0) |
| 652 | return err; |
| 653 | |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 654 | if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { |
| 655 | if (tb[TCA_GRED_LIMIT] != NULL) |
| 656 | sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 657 | return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 658 | } |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 659 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 660 | if (tb[TCA_GRED_PARMS] == NULL || |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 661 | tb[TCA_GRED_STAB] == NULL || |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 662 | tb[TCA_GRED_LIMIT] != NULL) { |
| 663 | NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time"); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 664 | return -EINVAL; |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 665 | } |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 666 | |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 667 | max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; |
| 668 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 669 | ctl = nla_data(tb[TCA_GRED_PARMS]); |
| 670 | stab = nla_data(tb[TCA_GRED_STAB]); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 671 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 672 | if (ctl->DP >= table->DPs) { |
| 673 | NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count"); |
Jakub Kicinski | 255f480 | 2018-11-14 22:23:45 -0800 | [diff] [blame] | 674 | return -EINVAL; |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 675 | } |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 676 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 677 | if (tb[TCA_GRED_VQ_LIST]) { |
| 678 | err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], |
| 679 | extack); |
| 680 | if (err) |
| 681 | return err; |
| 682 | } |
| 683 | |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 684 | if (gred_rio_mode(table)) { |
| 685 | if (ctl->prio == 0) { |
| 686 | int def_prio = GRED_DEF_PRIO; |
| 687 | |
| 688 | if (table->tab[table->def]) |
| 689 | def_prio = table->tab[table->def]->prio; |
| 690 | |
| 691 | printk(KERN_DEBUG "GRED: DP %u does not have a prio " |
| 692 | "setting default to %d\n", ctl->DP, def_prio); |
| 693 | |
| 694 | prio = def_prio; |
| 695 | } else |
| 696 | prio = ctl->prio; |
| 697 | } |
| 698 | |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 699 | prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 700 | sch_tree_lock(sch); |
| 701 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 702 | err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, |
| 703 | extack); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 704 | if (err < 0) |
Jakub Kicinski | 255f480 | 2018-11-14 22:23:45 -0800 | [diff] [blame] | 705 | goto err_unlock_free; |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 706 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 707 | if (tb[TCA_GRED_VQ_LIST]) |
| 708 | gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); |
| 709 | |
Thomas Graf | d6fd4e9 | 2005-11-05 21:14:10 +0100 | [diff] [blame] | 710 | if (gred_rio_mode(table)) { |
Thomas Graf | dea3f62 | 2005-11-05 21:14:09 +0100 | [diff] [blame] | 711 | gred_disable_wred_mode(table); |
| 712 | if (gred_wred_mode_check(sch)) |
| 713 | gred_enable_wred_mode(table); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | } |
| 715 | |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 716 | sch_tree_unlock(sch); |
Eric Dumazet | 869aa41 | 2011-12-15 22:09:45 +0000 | [diff] [blame] | 717 | kfree(prealloc); |
Jakub Kicinski | 890d8d2 | 2018-11-19 15:21:42 -0800 | [diff] [blame] | 718 | |
| 719 | gred_offload(sch, TC_GRED_REPLACE); |
Jakub Kicinski | 255f480 | 2018-11-14 22:23:45 -0800 | [diff] [blame] | 720 | return 0; |
| 721 | |
| 722 | err_unlock_free: |
| 723 | sch_tree_unlock(sch); |
| 724 | kfree(prealloc); |
Thomas Graf | f62d6b9 | 2005-11-05 21:14:15 +0100 | [diff] [blame] | 725 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 726 | } |
| 727 | |
Alexander Aring | e63d7dfd | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 728 | static int gred_init(struct Qdisc *sch, struct nlattr *opt, |
| 729 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 731 | struct nlattr *tb[TCA_GRED_MAX + 1]; |
Patrick McHardy | cee6372 | 2008-01-23 20:33:32 -0800 | [diff] [blame] | 732 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | |
Alexander Aring | ac8ef4a | 2017-12-20 12:35:11 -0500 | [diff] [blame] | 734 | if (!opt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | return -EINVAL; |
| 736 | |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 737 | err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, |
| 738 | extack); |
Patrick McHardy | cee6372 | 2008-01-23 20:33:32 -0800 | [diff] [blame] | 739 | if (err < 0) |
| 740 | return err; |
| 741 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 742 | if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) { |
| 743 | NL_SET_ERR_MSG_MOD(extack, |
| 744 | "virtual queue configuration can't be specified at initialization time"); |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 745 | return -EINVAL; |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 746 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 748 | if (tb[TCA_GRED_LIMIT]) |
| 749 | sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); |
Phil Sutter | 348e343 | 2015-08-18 10:30:49 +0200 | [diff] [blame] | 750 | else |
| 751 | sch->limit = qdisc_dev(sch)->tx_queue_len |
| 752 | * psched_mtu(qdisc_dev(sch)); |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 753 | |
Jakub Kicinski | 4777be0 | 2018-11-14 22:23:47 -0800 | [diff] [blame] | 754 | return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 758 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | struct gred_sched *table = qdisc_priv(sch); |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 760 | struct nlattr *parms, *vqs, *opts = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | int i; |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 762 | u32 max_p[MAX_DPs]; |
Thomas Graf | e063682 | 2005-11-05 21:14:12 +0100 | [diff] [blame] | 763 | struct tc_gred_sopt sopt = { |
| 764 | .DPs = table->DPs, |
| 765 | .def_DP = table->def, |
| 766 | .grio = gred_rio_mode(table), |
Thomas Graf | b38c7ee | 2005-11-05 21:14:27 +0100 | [diff] [blame] | 767 | .flags = table->red_flags, |
Thomas Graf | e063682 | 2005-11-05 21:14:12 +0100 | [diff] [blame] | 768 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | |
Jakub Kicinski | e49efd5 | 2018-11-19 15:21:43 -0800 | [diff] [blame] | 770 | if (gred_offload_dump_stats(sch)) |
| 771 | goto nla_put_failure; |
| 772 | |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 773 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 774 | if (opts == NULL) |
| 775 | goto nla_put_failure; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 776 | if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) |
| 777 | goto nla_put_failure; |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 778 | |
| 779 | for (i = 0; i < MAX_DPs; i++) { |
| 780 | struct gred_sched_data *q = table->tab[i]; |
| 781 | |
| 782 | max_p[i] = q ? q->parms.max_P : 0; |
| 783 | } |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 784 | if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) |
| 785 | goto nla_put_failure; |
Eric Dumazet | a73ed26 | 2011-12-09 02:46:45 +0000 | [diff] [blame] | 786 | |
David Ward | a3eb95f | 2015-05-09 22:01:46 -0400 | [diff] [blame] | 787 | if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) |
| 788 | goto nla_put_failure; |
| 789 | |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 790 | /* Old style all-in-one dump of VQs */ |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 791 | parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS); |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 792 | if (parms == NULL) |
| 793 | goto nla_put_failure; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 795 | for (i = 0; i < MAX_DPs; i++) { |
| 796 | struct gred_sched_data *q = table->tab[i]; |
| 797 | struct tc_gred_qopt opt; |
David Ward | 1fe37b1 | 2012-09-13 05:22:34 +0000 | [diff] [blame] | 798 | unsigned long qavg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 800 | memset(&opt, 0, sizeof(opt)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | |
| 802 | if (!q) { |
| 803 | /* hack -- fix at some point with proper message |
| 804 | This is how we indicate to tc that there is no VQ |
| 805 | at this DP */ |
| 806 | |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 807 | opt.DP = MAX_DPs + i; |
| 808 | goto append_opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | } |
| 810 | |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 811 | opt.limit = q->limit; |
| 812 | opt.DP = q->DP; |
David Ward | 145a42b | 2015-05-09 22:01:47 -0400 | [diff] [blame] | 813 | opt.backlog = gred_backlog(table, q, sch); |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 814 | opt.prio = q->prio; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 815 | opt.qth_min = q->parms.qth_min >> q->parms.Wlog; |
| 816 | opt.qth_max = q->parms.qth_max >> q->parms.Wlog; |
| 817 | opt.Wlog = q->parms.Wlog; |
| 818 | opt.Plog = q->parms.Plog; |
| 819 | opt.Scell_log = q->parms.Scell_log; |
| 820 | opt.other = q->stats.other; |
| 821 | opt.early = q->stats.prob_drop; |
| 822 | opt.forced = q->stats.forced_drop; |
| 823 | opt.pdrop = q->stats.pdrop; |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 824 | opt.packets = q->packetsin; |
| 825 | opt.bytesin = q->bytesin; |
| 826 | |
David Ward | 244b65d | 2012-04-15 12:31:45 +0000 | [diff] [blame] | 827 | if (gred_wred_mode(table)) |
| 828 | gred_load_wred_set(table, q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | |
David Ward | 1fe37b1 | 2012-09-13 05:22:34 +0000 | [diff] [blame] | 830 | qavg = red_calc_qavg(&q->parms, &q->vars, |
| 831 | q->vars.qavg >> q->parms.Wlog); |
| 832 | opt.qave = qavg >> q->parms.Wlog; |
Thomas Graf | 22b3342 | 2005-11-05 21:14:16 +0100 | [diff] [blame] | 833 | |
Thomas Graf | 05f1cc0 | 2005-11-05 21:14:11 +0100 | [diff] [blame] | 834 | append_opt: |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 835 | if (nla_append(skb, sizeof(opt), &opt) < 0) |
| 836 | goto nla_put_failure; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | } |
| 838 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 839 | nla_nest_end(skb, parms); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 841 | /* Dump the VQs again, in more structured way */ |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 842 | vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 843 | if (!vqs) |
| 844 | goto nla_put_failure; |
| 845 | |
| 846 | for (i = 0; i < MAX_DPs; i++) { |
| 847 | struct gred_sched_data *q = table->tab[i]; |
| 848 | struct nlattr *vq; |
| 849 | |
| 850 | if (!q) |
| 851 | continue; |
| 852 | |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 853 | vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY); |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 854 | if (!vq) |
| 855 | goto nla_put_failure; |
| 856 | |
| 857 | if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) |
| 858 | goto nla_put_failure; |
| 859 | |
Jakub Kicinski | 7211101 | 2018-11-14 22:23:51 -0800 | [diff] [blame] | 860 | if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) |
| 861 | goto nla_put_failure; |
| 862 | |
Jakub Kicinski | 80e22e9 | 2018-11-14 22:23:49 -0800 | [diff] [blame] | 863 | /* Stats */ |
| 864 | if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, |
| 865 | TCA_GRED_VQ_PAD)) |
| 866 | goto nla_put_failure; |
| 867 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) |
| 868 | goto nla_put_failure; |
| 869 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG, |
| 870 | gred_backlog(table, q, sch))) |
| 871 | goto nla_put_failure; |
| 872 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP, |
| 873 | q->stats.prob_drop)) |
| 874 | goto nla_put_failure; |
| 875 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK, |
| 876 | q->stats.prob_mark)) |
| 877 | goto nla_put_failure; |
| 878 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP, |
| 879 | q->stats.forced_drop)) |
| 880 | goto nla_put_failure; |
| 881 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK, |
| 882 | q->stats.forced_mark)) |
| 883 | goto nla_put_failure; |
| 884 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) |
| 885 | goto nla_put_failure; |
| 886 | if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) |
| 887 | goto nla_put_failure; |
| 888 | |
| 889 | nla_nest_end(skb, vq); |
| 890 | } |
| 891 | nla_nest_end(skb, vqs); |
| 892 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 893 | return nla_nest_end(skb, opts); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 894 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 895 | nla_put_failure: |
Thomas Graf | bc3ed28 | 2008-06-03 16:36:54 -0700 | [diff] [blame] | 896 | nla_nest_cancel(skb, opts); |
| 897 | return -EMSGSIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | static void gred_destroy(struct Qdisc *sch) |
| 901 | { |
| 902 | struct gred_sched *table = qdisc_priv(sch); |
| 903 | int i; |
| 904 | |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 905 | for (i = 0; i < table->DPs; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | if (table->tab[i]) |
Thomas Graf | 6639607 | 2005-11-05 21:14:13 +0100 | [diff] [blame] | 907 | gred_destroy_vq(table->tab[i]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | } |
Jakub Kicinski | 890d8d2 | 2018-11-19 15:21:42 -0800 | [diff] [blame] | 909 | gred_offload(sch, TC_GRED_DESTROY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | } |
| 911 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 912 | static struct Qdisc_ops gred_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | .id = "gred", |
| 914 | .priv_size = sizeof(struct gred_sched), |
| 915 | .enqueue = gred_enqueue, |
| 916 | .dequeue = gred_dequeue, |
Jarek Poplawski | 8e3af97 | 2008-10-31 00:45:55 -0700 | [diff] [blame] | 917 | .peek = qdisc_peek_head, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | .init = gred_init, |
| 919 | .reset = gred_reset, |
| 920 | .destroy = gred_destroy, |
| 921 | .change = gred_change, |
| 922 | .dump = gred_dump, |
| 923 | .owner = THIS_MODULE, |
| 924 | }; |
| 925 | |
| 926 | static int __init gred_module_init(void) |
| 927 | { |
| 928 | return register_qdisc(&gred_qdisc_ops); |
| 929 | } |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 930 | |
| 931 | static void __exit gred_module_exit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | { |
| 933 | unregister_qdisc(&gred_qdisc_ops); |
| 934 | } |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 935 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | module_init(gred_module_init) |
| 937 | module_exit(gred_module_exit) |
Thomas Graf | 1e4dfaf9 | 2005-11-05 21:14:25 +0100 | [diff] [blame] | 938 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | MODULE_LICENSE("GPL"); |