blob: bd618b00d3193242b1a166b5c48b3bccbcef5d2e [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
stephen hemminger45e14432011-02-02 15:21:10 +00002/*
3 * net/sched/sch_choke.c CHOKE scheduler
4 *
5 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
stephen hemminger45e14432011-02-02 15:21:10 +00007 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/skbuff.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080013#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000014#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010015#include <net/pkt_cls.h>
stephen hemminger45e14432011-02-02 15:21:10 +000016#include <net/inet_ecn.h>
17#include <net/red.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020018#include <net/flow_dissector.h>
stephen hemminger45e14432011-02-02 15:21:10 +000019
20/*
21 CHOKe stateless AQM for fair bandwidth allocation
22 =================================================
23
24 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
25 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
26 maintains no flow state. The difference from RED is an additional step
27 during the enqueuing process. If average queue size is over the
28 low threshold (qmin), a packet is chosen at random from the queue.
29 If both the new and chosen packet are from the same flow, both
30 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
31 needs to access packets in queue randomly. It has a minimal class
32 interface to allow overriding the builtin flow classifier with
33 filters.
34
35 Source:
36 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
37 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
38 IEEE INFOCOM, 2000.
39
40 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
41 Characteristics", IEEE/ACM Transactions on Networking, 2004
42
43 */
44
45/* Upper bound on size of sk_buff table (packets) */
46#define CHOKE_MAX_QUEUE (128*1024 - 1)
47
48struct choke_sched_data {
49/* Parameters */
50 u32 limit;
51 unsigned char flags;
52
53 struct red_parms parms;
54
55/* Variables */
Eric Dumazeteeca6682012-01-05 02:25:16 +000056 struct red_vars vars;
stephen hemminger45e14432011-02-02 15:21:10 +000057 struct {
58 u32 prob_drop; /* Early probability drops */
59 u32 prob_mark; /* Early probability marks */
60 u32 forced_drop; /* Forced drops, qavg > max_thresh */
61 u32 forced_mark; /* Forced marks, qavg > max_thresh */
62 u32 pdrop; /* Drops due to queue limits */
63 u32 other; /* Drops due to drop() calls */
64 u32 matched; /* Drops to flow match */
65 } stats;
66
67 unsigned int head;
68 unsigned int tail;
69
70 unsigned int tab_mask; /* size - 1 */
71
72 struct sk_buff **tab;
73};
74
stephen hemminger45e14432011-02-02 15:21:10 +000075/* number of elements in queue including holes */
76static unsigned int choke_len(const struct choke_sched_data *q)
77{
78 return (q->tail - q->head) & q->tab_mask;
79}
80
81/* Is ECN parameter configured */
82static int use_ecn(const struct choke_sched_data *q)
83{
84 return q->flags & TC_RED_ECN;
85}
86
87/* Should packets over max just be dropped (versus marked) */
88static int use_harddrop(const struct choke_sched_data *q)
89{
90 return q->flags & TC_RED_HARDDROP;
91}
92
93/* Move head pointer forward to skip over holes */
94static void choke_zap_head_holes(struct choke_sched_data *q)
95{
96 do {
97 q->head = (q->head + 1) & q->tab_mask;
98 if (q->head == q->tail)
99 break;
100 } while (q->tab[q->head] == NULL);
101}
102
103/* Move tail pointer backwards to reuse holes */
104static void choke_zap_tail_holes(struct choke_sched_data *q)
105{
106 do {
107 q->tail = (q->tail - 1) & q->tab_mask;
108 if (q->head == q->tail)
109 break;
110 } while (q->tab[q->tail] == NULL);
111}
112
113/* Drop packet from queue array by creating a "hole" */
Eric Dumazet520ac302016-06-21 23:16:49 -0700114static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
115 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000116{
117 struct choke_sched_data *q = qdisc_priv(sch);
118 struct sk_buff *skb = q->tab[idx];
119
120 q->tab[idx] = NULL;
121
122 if (idx == q->head)
123 choke_zap_head_holes(q);
124 if (idx == q->tail)
125 choke_zap_tail_holes(q);
126
John Fastabend25331d62014-09-28 11:53:29 -0700127 qdisc_qstats_backlog_dec(sch, skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800128 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
Eric Dumazet520ac302016-06-21 23:16:49 -0700129 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000130 --sch->q.qlen;
131}
132
Eric Dumazet26f70e12011-02-24 17:45:41 +0000133struct choke_skb_cb {
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000134 u8 keys_valid;
Tom Herbert2e994032015-05-01 11:30:18 -0700135 struct flow_keys_digest keys;
Eric Dumazet26f70e12011-02-24 17:45:41 +0000136};
137
138static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
139{
David S. Miller16bda132012-02-06 15:14:37 -0500140 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
Eric Dumazet26f70e12011-02-24 17:45:41 +0000141 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
142}
143
stephen hemminger45e14432011-02-02 15:21:10 +0000144/*
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000145 * Compare flow of two packets
146 * Returns true only if source and destination address and port match.
147 * false for special cases
148 */
149static bool choke_match_flow(struct sk_buff *skb1,
150 struct sk_buff *skb2)
151{
Eric Dumazet25711782014-09-18 08:02:05 -0700152 struct flow_keys temp;
153
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000154 if (skb1->protocol != skb2->protocol)
155 return false;
156
157 if (!choke_skb_cb(skb1)->keys_valid) {
158 choke_skb_cb(skb1)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700159 skb_flow_dissect_flow_keys(skb1, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700160 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000161 }
162
163 if (!choke_skb_cb(skb2)->keys_valid) {
164 choke_skb_cb(skb2)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700165 skb_flow_dissect_flow_keys(skb2, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700166 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000167 }
168
169 return !memcmp(&choke_skb_cb(skb1)->keys,
170 &choke_skb_cb(skb2)->keys,
Tom Herbert2e994032015-05-01 11:30:18 -0700171 sizeof(choke_skb_cb(skb1)->keys));
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000172}
173
174/*
stephen hemminger45e14432011-02-02 15:21:10 +0000175 * Select a packet at random from queue
176 * HACK: since queue can have holes from previous deletion; retry several
177 * times to find a random skb but then just give up and return the head
178 * Will return NULL if queue is empty (q->head == q->tail)
179 */
180static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
181 unsigned int *pidx)
182{
183 struct sk_buff *skb;
184 int retrys = 3;
185
186 do {
Daniel Borkmannf337db62014-01-22 02:29:39 +0100187 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
stephen hemminger45e14432011-02-02 15:21:10 +0000188 skb = q->tab[*pidx];
189 if (skb)
190 return skb;
191 } while (--retrys > 0);
192
193 return q->tab[*pidx = q->head];
194}
195
196/*
197 * Compare new packet with random packet in queue
198 * returns true if matched and sets *pidx
199 */
200static bool choke_match_random(const struct choke_sched_data *q,
201 struct sk_buff *nskb,
202 unsigned int *pidx)
203{
204 struct sk_buff *oskb;
205
206 if (q->head == q->tail)
207 return false;
208
209 oskb = choke_peek_random(q, pidx);
stephen hemminger45e14432011-02-02 15:21:10 +0000210 return choke_match_flow(oskb, nskb);
211}
212
Eric Dumazet520ac302016-06-21 23:16:49 -0700213static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
214 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000215{
216 struct choke_sched_data *q = qdisc_priv(sch);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000217 const struct red_parms *p = &q->parms;
stephen hemminger45e14432011-02-02 15:21:10 +0000218
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000219 choke_skb_cb(skb)->keys_valid = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000220 /* Compute average queue usage (see RED) */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000221 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
222 if (red_is_idling(&q->vars))
223 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000224
225 /* Is queue small? */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000226 if (q->vars.qavg <= p->qth_min)
227 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000228 else {
229 unsigned int idx;
230
231 /* Draw a packet at random from queue and compare flow */
232 if (choke_match_random(q, skb, &idx)) {
233 q->stats.matched++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700234 choke_drop_by_idx(sch, idx, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000235 goto congestion_drop;
236 }
237
238 /* Queue is large, always mark/drop */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000239 if (q->vars.qavg > p->qth_max) {
240 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000241
John Fastabend25331d62014-09-28 11:53:29 -0700242 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000243 if (use_harddrop(q) || !use_ecn(q) ||
244 !INET_ECN_set_ce(skb)) {
245 q->stats.forced_drop++;
246 goto congestion_drop;
247 }
248
249 q->stats.forced_mark++;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000250 } else if (++q->vars.qcount) {
251 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
252 q->vars.qcount = 0;
253 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000254
John Fastabend25331d62014-09-28 11:53:29 -0700255 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000256 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
257 q->stats.prob_drop++;
258 goto congestion_drop;
259 }
260
261 q->stats.prob_mark++;
262 }
263 } else
Eric Dumazeteeca6682012-01-05 02:25:16 +0000264 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000265 }
266
267 /* Admit new packet */
268 if (sch->q.qlen < q->limit) {
269 q->tab[q->tail] = skb;
270 q->tail = (q->tail + 1) & q->tab_mask;
271 ++sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700272 qdisc_qstats_backlog_inc(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000273 return NET_XMIT_SUCCESS;
274 }
275
276 q->stats.pdrop++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700277 return qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000278
Eric Dumazet17045752012-05-04 04:37:21 +0000279congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700280 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000281 return NET_XMIT_CN;
stephen hemminger45e14432011-02-02 15:21:10 +0000282}
283
284static struct sk_buff *choke_dequeue(struct Qdisc *sch)
285{
286 struct choke_sched_data *q = qdisc_priv(sch);
287 struct sk_buff *skb;
288
289 if (q->head == q->tail) {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000290 if (!red_is_idling(&q->vars))
291 red_start_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000292 return NULL;
293 }
294
295 skb = q->tab[q->head];
296 q->tab[q->head] = NULL;
297 choke_zap_head_holes(q);
298 --sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700299 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000300 qdisc_bstats_update(sch, skb);
301
302 return skb;
303}
304
stephen hemminger45e14432011-02-02 15:21:10 +0000305static void choke_reset(struct Qdisc *sch)
306{
307 struct choke_sched_data *q = qdisc_priv(sch);
308
WANG Cong77e62da2015-07-21 16:52:43 -0700309 while (q->head != q->tail) {
310 struct sk_buff *skb = q->tab[q->head];
311
312 q->head = (q->head + 1) & q->tab_mask;
313 if (!skb)
314 continue;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700315 rtnl_qdisc_drop(skb, sch);
WANG Cong77e62da2015-07-21 16:52:43 -0700316 }
317
Eric Dumazetf9aed312016-06-13 20:21:51 -0700318 sch->q.qlen = 0;
319 sch->qstats.backlog = 0;
Eric Dumazet8738c852020-04-25 15:19:51 -0700320 if (q->tab)
321 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
WANG Cong77e62da2015-07-21 16:52:43 -0700322 q->head = q->tail = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000323 red_restart(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000324}
325
326static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
327 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
328 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000329 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
stephen hemminger45e14432011-02-02 15:21:10 +0000330};
331
332
333static void choke_free(void *addr)
334{
WANG Cong4cb28972014-06-02 15:55:22 -0700335 kvfree(addr);
stephen hemminger45e14432011-02-02 15:21:10 +0000336}
337
Alexander Aring20307212017-12-20 12:35:14 -0500338static int choke_change(struct Qdisc *sch, struct nlattr *opt,
339 struct netlink_ext_ack *extack)
stephen hemminger45e14432011-02-02 15:21:10 +0000340{
341 struct choke_sched_data *q = qdisc_priv(sch);
342 struct nlattr *tb[TCA_CHOKE_MAX + 1];
343 const struct tc_red_qopt *ctl;
344 int err;
345 struct sk_buff **old = NULL;
346 unsigned int mask;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000347 u32 max_P;
stephen hemminger45e14432011-02-02 15:21:10 +0000348
349 if (opt == NULL)
350 return -EINVAL;
351
Johannes Berg8cb08172019-04-26 14:07:28 +0200352 err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
353 choke_policy, NULL);
stephen hemminger45e14432011-02-02 15:21:10 +0000354 if (err < 0)
355 return err;
356
357 if (tb[TCA_CHOKE_PARMS] == NULL ||
358 tb[TCA_CHOKE_STAB] == NULL)
359 return -EINVAL;
360
Eric Dumazeta73ed262011-12-09 02:46:45 +0000361 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
362
stephen hemminger45e14432011-02-02 15:21:10 +0000363 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
364
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200365 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
366 return -EINVAL;
367
stephen hemminger45e14432011-02-02 15:21:10 +0000368 if (ctl->limit > CHOKE_MAX_QUEUE)
369 return -EINVAL;
370
371 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
372 if (mask != q->tab_mask) {
373 struct sk_buff **ntab;
374
Joe Perches793da4b2020-01-28 11:12:03 -0800375 ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
stephen hemminger45e14432011-02-02 15:21:10 +0000376 if (!ntab)
377 return -ENOMEM;
378
379 sch_tree_lock(sch);
380 old = q->tab;
381 if (old) {
382 unsigned int oqlen = sch->q.qlen, tail = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800383 unsigned dropped = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000384
385 while (q->head != q->tail) {
386 struct sk_buff *skb = q->tab[q->head];
387
388 q->head = (q->head + 1) & q->tab_mask;
389 if (!skb)
390 continue;
391 if (tail < mask) {
392 ntab[tail++] = skb;
393 continue;
394 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800395 dropped += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700396 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000397 --sch->q.qlen;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700398 rtnl_qdisc_drop(skb, sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000399 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800400 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
stephen hemminger45e14432011-02-02 15:21:10 +0000401 q->head = 0;
402 q->tail = tail;
403 }
404
405 q->tab_mask = mask;
406 q->tab = ntab;
407 } else
408 sch_tree_lock(sch);
409
410 q->flags = ctl->flags;
411 q->limit = ctl->limit;
412
413 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
414 ctl->Plog, ctl->Scell_log,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000415 nla_data(tb[TCA_CHOKE_STAB]),
416 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000417 red_set_vars(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000418
419 if (q->head == q->tail)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000420 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000421
422 sch_tree_unlock(sch);
423 choke_free(old);
424 return 0;
425}
426
Alexander Aringe63d7dfd2017-12-20 12:35:13 -0500427static int choke_init(struct Qdisc *sch, struct nlattr *opt,
428 struct netlink_ext_ack *extack)
stephen hemminger45e14432011-02-02 15:21:10 +0000429{
Alexander Aring20307212017-12-20 12:35:14 -0500430 return choke_change(sch, opt, extack);
stephen hemminger45e14432011-02-02 15:21:10 +0000431}
432
433static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
434{
435 struct choke_sched_data *q = qdisc_priv(sch);
436 struct nlattr *opts = NULL;
437 struct tc_red_qopt opt = {
438 .limit = q->limit,
439 .flags = q->flags,
440 .qth_min = q->parms.qth_min >> q->parms.Wlog,
441 .qth_max = q->parms.qth_max >> q->parms.Wlog,
442 .Wlog = q->parms.Wlog,
443 .Plog = q->parms.Plog,
444 .Scell_log = q->parms.Scell_log,
445 };
446
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200447 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
stephen hemminger45e14432011-02-02 15:21:10 +0000448 if (opts == NULL)
449 goto nla_put_failure;
450
David S. Miller1b34ec42012-03-29 05:11:39 -0400451 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
452 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
453 goto nla_put_failure;
stephen hemminger45e14432011-02-02 15:21:10 +0000454 return nla_nest_end(skb, opts);
455
456nla_put_failure:
457 nla_nest_cancel(skb, opts);
458 return -EMSGSIZE;
459}
460
461static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
462{
463 struct choke_sched_data *q = qdisc_priv(sch);
464 struct tc_choke_xstats st = {
465 .early = q->stats.prob_drop + q->stats.forced_drop,
466 .marked = q->stats.prob_mark + q->stats.forced_mark,
467 .pdrop = q->stats.pdrop,
468 .other = q->stats.other,
469 .matched = q->stats.matched,
470 };
471
472 return gnet_stats_copy_app(d, &st, sizeof(st));
473}
474
475static void choke_destroy(struct Qdisc *sch)
476{
477 struct choke_sched_data *q = qdisc_priv(sch);
478
stephen hemminger45e14432011-02-02 15:21:10 +0000479 choke_free(q->tab);
480}
481
stephen hemminger45e14432011-02-02 15:21:10 +0000482static struct sk_buff *choke_peek_head(struct Qdisc *sch)
483{
484 struct choke_sched_data *q = qdisc_priv(sch);
485
486 return (q->head != q->tail) ? q->tab[q->head] : NULL;
487}
488
489static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
490 .id = "choke",
491 .priv_size = sizeof(struct choke_sched_data),
492
493 .enqueue = choke_enqueue,
494 .dequeue = choke_dequeue,
495 .peek = choke_peek_head,
stephen hemminger45e14432011-02-02 15:21:10 +0000496 .init = choke_init,
497 .destroy = choke_destroy,
498 .reset = choke_reset,
499 .change = choke_change,
500 .dump = choke_dump,
501 .dump_stats = choke_dump_stats,
502 .owner = THIS_MODULE,
503};
504
505static int __init choke_module_init(void)
506{
507 return register_qdisc(&choke_qdisc_ops);
508}
509
510static void __exit choke_module_exit(void)
511{
512 unregister_qdisc(&choke_qdisc_ops);
513}
514
515module_init(choke_module_init)
516module_exit(choke_module_exit)
517
518MODULE_LICENSE("GPL");