blob: 193f88ebf629bd5a66c2d155346b40695e259a13 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
Cong Wang7aa00452017-10-26 18:24:28 -07006#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <net/sch_generic.h>
8#include <net/act_api.h>
Jiri Pirkoa5148622019-06-15 11:03:49 +02009#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Paolo Abenicd11b1642018-07-30 14:30:44 +020011/* TC action not accessible from user space */
John Hurley720f22f2019-06-24 23:13:35 +010012#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
Paolo Abenicd11b1642018-07-30 14:30:44 +020013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/* Basic packet classifier frontend definitions. */
15
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +000016struct tcf_walker {
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 int stop;
18 int skip;
19 int count;
Vlad Buslov6676d5e2019-02-25 17:38:31 +020020 bool nonempty;
Vlad Buslov01683a12018-07-09 13:29:11 +030021 unsigned long cookie;
WANG Cong8113c092017-08-04 21:31:43 -070022 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023};
24
Joe Perches5c152572013-07-30 22:47:13 -070025int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Jiri Pirko8c4083b2017-10-19 15:50:29 +020028struct tcf_block_ext_info {
Pablo Neira Ayuso32f8c402019-07-09 22:55:41 +020029 enum flow_block_binder_type binder_type;
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010030 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
Jiri Pirko48617382018-01-17 11:46:46 +010032 u32 block_index;
Jiri Pirko8c4083b2017-10-19 15:50:29 +020033};
34
Petr Machata36257502020-06-27 01:45:26 +030035struct tcf_qevent {
36 struct tcf_block *block;
37 struct tcf_block_ext_info info;
38 struct tcf_proto __rcu *filter_chain;
39};
40
Jiri Pirkoacb67442017-10-19 15:50:31 +020041struct tcf_block_cb;
Cong Wangaaa908f2018-05-23 15:26:53 -070042bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
Jiri Pirkoacb67442017-10-19 15:50:31 +020043
Jiri Pirko8ae70032017-02-15 11:57:50 +010044#ifdef CONFIG_NET_CLS
Jiri Pirko1f3ed382018-07-27 09:45:05 +020045struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 u32 chain_index);
Jiri Pirko1f3ed382018-07-27 09:45:05 +020047void tcf_chain_put_by_act(struct tcf_chain *chain);
Vlad Buslovbbf73832019-02-11 10:55:36 +020048struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 struct tcf_chain *chain);
Vlad Buslovfe2923a2019-02-11 10:55:40 +020050struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
Vlad Buslov0fca55e2020-11-27 17:12:05 +020051 struct tcf_proto *tp);
Jiri Pirkof36fe1c2018-01-17 11:46:48 +010052void tcf_block_netif_keep_dst(struct tcf_block *block);
Jiri Pirko6529eab2017-05-17 11:07:55 +020053int tcf_block_get(struct tcf_block **p_block,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050054 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 struct netlink_ext_ack *extack);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010056int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Alexander Aring8d1a77f2017-12-20 12:35:19 -050057 struct tcf_block_ext_info *ei,
58 struct netlink_ext_ack *extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +020059void tcf_block_put(struct tcf_block *block);
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +010060void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +020061 struct tcf_block_ext_info *ei);
Jiri Pirko44186462017-10-13 14:00:59 +020062
Jiri Pirko48617382018-01-17 11:46:46 +010063static inline bool tcf_block_shared(struct tcf_block *block)
64{
65 return block->index;
66}
67
Vlad Buslovc1a970d2019-07-10 20:12:29 +030068static inline bool tcf_block_non_null_shared(struct tcf_block *block)
69{
70 return block && block->index;
71}
72
Jiri Pirko44186462017-10-13 14:00:59 +020073static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
74{
Jiri Pirko48617382018-01-17 11:46:46 +010075 WARN_ON(tcf_block_shared(block));
Jiri Pirko44186462017-10-13 14:00:59 +020076 return block->q;
77}
78
Davide Caratti3aa26052021-07-28 20:08:00 +020079int tcf_classify(struct sk_buff *skb,
80 const struct tcf_block *block,
81 const struct tcf_proto *tp, struct tcf_result *res,
82 bool compat_mode);
Jiri Pirko87d83092017-05-17 11:07:54 +020083
Jiri Pirko8ae70032017-02-15 11:57:50 +010084#else
Pieter Jansen van Vuuren88c44a52019-05-04 04:46:25 -070085static inline bool tcf_block_shared(struct tcf_block *block)
86{
87 return false;
88}
89
Vlad Buslovc1a970d2019-07-10 20:12:29 +030090static inline bool tcf_block_non_null_shared(struct tcf_block *block)
91{
92 return false;
93}
94
Jiri Pirko6529eab2017-05-17 11:07:55 +020095static inline
96int tcf_block_get(struct tcf_block **p_block,
Sudip Mukherjee3c149092017-12-22 15:52:05 +000097 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
98 struct netlink_ext_ack *extack)
Jiri Pirko6529eab2017-05-17 11:07:55 +020099{
100 return 0;
101}
102
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200103static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100104int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
Quentin Monnet33c30a82018-01-03 17:30:45 -0800105 struct tcf_block_ext_info *ei,
106 struct netlink_ext_ack *extack)
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200107{
108 return 0;
109}
110
Jiri Pirko6529eab2017-05-17 11:07:55 +0200111static inline void tcf_block_put(struct tcf_block *block)
Jiri Pirko8ae70032017-02-15 11:57:50 +0100112{
113}
Jiri Pirko87d83092017-05-17 11:07:54 +0200114
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200115static inline
Jiri Pirkoc7eb7d72017-11-03 11:46:24 +0100116void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
Jiri Pirko8c4083b2017-10-19 15:50:29 +0200117 struct tcf_block_ext_info *ei)
118{
119}
120
Jiri Pirko44186462017-10-13 14:00:59 +0200121static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
122{
123 return NULL;
124}
125
Jiri Pirkoacb67442017-10-19 15:50:31 +0200126static inline
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200127int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
Jiri Pirkoacb67442017-10-19 15:50:31 +0200128 void *cb_priv)
129{
130 return 0;
131}
132
133static inline
Pablo Neira Ayusoa7323312019-07-19 18:20:15 +0200134void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
Jiri Pirkoacb67442017-10-19 15:50:31 +0200135 void *cb_priv)
136{
137}
138
Davide Caratti3aa26052021-07-28 20:08:00 +0200139static inline int tcf_classify(struct sk_buff *skb,
140 const struct tcf_block *block,
141 const struct tcf_proto *tp,
Jiri Pirko87d83092017-05-17 11:07:54 +0200142 struct tcf_result *res, bool compat_mode)
143{
144 return TC_ACT_UNSPEC;
145}
Paul Blakey9410c942020-02-16 12:01:21 +0200146
Jiri Pirko8ae70032017-02-15 11:57:50 +0100147#endif
Jiri Pirkocf1facd2017-02-09 14:38:56 +0100148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149static inline unsigned long
150__cls_set_class(unsigned long *clp, unsigned long cl)
151{
WANG Conga0efb802014-09-30 16:07:24 -0700152 return xchg(clp, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
154
Cong Wang2e24cd72020-01-23 16:26:18 -0800155static inline void
156__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
Cong Wang2e24cd72020-01-23 16:26:18 -0800158 unsigned long cl;
Jiri Pirko34e37592017-10-13 14:01:00 +0200159
Cong Wang2e24cd72020-01-23 16:26:18 -0800160 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
161 cl = __cls_set_class(&r->class, cl);
162 if (cl)
163 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165
166static inline void
167tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
168{
Jiri Pirko34e37592017-10-13 14:01:00 +0200169 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Jiri Pirko34e37592017-10-13 14:01:00 +0200171 /* Check q as it is not set for shared blocks. In that case,
172 * setting class is not supported.
173 */
174 if (!q)
175 return;
Cong Wang2e24cd72020-01-23 16:26:18 -0800176 sch_tree_lock(q);
177 __tcf_bind_filter(q, r, base);
178 sch_tree_unlock(q);
179}
180
181static inline void
182__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
183{
184 unsigned long cl;
185
186 if ((cl = __cls_set_class(&r->class, 0)) != 0)
Jiri Pirko34e37592017-10-13 14:01:00 +0200187 q->ops->cl_ops->unbind_tcf(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
190static inline void
191tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
192{
Jiri Pirko34e37592017-10-13 14:01:00 +0200193 struct Qdisc *q = tp->chain->block->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Jiri Pirko34e37592017-10-13 14:01:00 +0200195 if (!q)
196 return;
Cong Wang2e24cd72020-01-23 16:26:18 -0800197 __tcf_unbind_filter(q, r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000200struct tcf_exts {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#ifdef CONFIG_NET_CLS_ACT
WANG Cong33be6272013-12-15 20:15:05 -0800202 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
WANG Cong22dc13c2016-08-13 22:35:00 -0700203 int nr_actions;
204 struct tc_action **actions;
Cong Wange4b95c42017-11-06 13:47:19 -0800205 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800207 /* Map to export classifier specific extension TLV types to the
208 * generic extensions API. Unsupported extensions must be set to 0.
209 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 int action;
211 int police;
212};
213
Cong Wang14215102019-02-20 21:37:42 -0800214static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
215 int action, int police)
WANG Cong33be6272013-12-15 20:15:05 -0800216{
217#ifdef CONFIG_NET_CLS_ACT
WANG Cong5da57f42013-12-15 20:15:07 -0800218 exts->type = 0;
WANG Cong22dc13c2016-08-13 22:35:00 -0700219 exts->nr_actions = 0;
Cong Wang14215102019-02-20 21:37:42 -0800220 exts->net = net;
WANG Cong22dc13c2016-08-13 22:35:00 -0700221 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
222 GFP_KERNEL);
WANG Congb9a24bb2016-08-19 12:36:54 -0700223 if (!exts->actions)
224 return -ENOMEM;
WANG Cong33be6272013-12-15 20:15:05 -0800225#endif
WANG Cong5da57f42013-12-15 20:15:07 -0800226 exts->action = action;
227 exts->police = police;
WANG Congb9a24bb2016-08-19 12:36:54 -0700228 return 0;
WANG Cong33be6272013-12-15 20:15:05 -0800229}
230
Cong Wange4b95c42017-11-06 13:47:19 -0800231/* Return false if the netns is being destroyed in cleanup_net(). Callers
232 * need to do cleanup synchronously in this case, otherwise may race with
233 * tc_action_net_exit(). Return true for other cases.
234 */
235static inline bool tcf_exts_get_net(struct tcf_exts *exts)
236{
237#ifdef CONFIG_NET_CLS_ACT
238 exts->net = maybe_get_net(exts->net);
239 return exts->net != NULL;
240#else
241 return true;
242#endif
243}
244
245static inline void tcf_exts_put_net(struct tcf_exts *exts)
246{
247#ifdef CONFIG_NET_CLS_ACT
248 if (exts->net)
249 put_net(exts->net);
250#endif
251}
252
WANG Cong22dc13c2016-08-13 22:35:00 -0700253#ifdef CONFIG_NET_CLS_ACT
Cong Wang244cd962018-08-19 12:22:09 -0700254#define tcf_exts_for_each_action(i, a, exts) \
255 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
256#else
257#define tcf_exts_for_each_action(i, a, exts) \
Arnd Bergmann191672c2018-08-22 17:25:44 +0200258 for (; 0; (void)(i), (void)(a), (void)(exts))
WANG Cong22dc13c2016-08-13 22:35:00 -0700259#endif
WANG Cong22dc13c2016-08-13 22:35:00 -0700260
Jakub Kicinskid897a632017-05-31 08:06:43 -0700261static inline void
262tcf_exts_stats_update(const struct tcf_exts *exts,
Po Liu4b61d3e2020-06-19 14:01:07 +0800263 u64 bytes, u64 packets, u64 drops, u64 lastuse,
Jiri Pirko93a129e2020-03-28 16:37:43 +0100264 u8 used_hw_stats, bool used_hw_stats_valid)
Jakub Kicinskid897a632017-05-31 08:06:43 -0700265{
266#ifdef CONFIG_NET_CLS_ACT
267 int i;
268
269 preempt_disable();
270
271 for (i = 0; i < exts->nr_actions; i++) {
272 struct tc_action *a = exts->actions[i];
273
Po Liu4b61d3e2020-06-19 14:01:07 +0800274 tcf_action_stats_update(a, bytes, packets, drops,
275 lastuse, true);
Jiri Pirko93a129e2020-03-28 16:37:43 +0100276 a->used_hw_stats = used_hw_stats;
277 a->used_hw_stats_valid = used_hw_stats_valid;
Jakub Kicinskid897a632017-05-31 08:06:43 -0700278 }
279
280 preempt_enable();
281#endif
282}
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284/**
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200285 * tcf_exts_has_actions - check if at least one action is present
286 * @exts: tc filter extensions handle
287 *
288 * Returns true if at least one action is present.
289 */
290static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
291{
WANG Cong2734437e2016-08-13 22:34:59 -0700292#ifdef CONFIG_NET_CLS_ACT
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200293 return exts->nr_actions;
294#else
295 return false;
296#endif
297}
WANG Cong2734437e2016-08-13 22:34:59 -0700298
Jiri Pirko3bcc0ce2017-08-04 14:28:58 +0200299/**
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200300 * tcf_exts_exec - execute tc filter extensions
301 * @skb: socket buffer
302 * @exts: tc filter extensions handle
303 * @res: desired result
304 *
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200305 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200306 * a negative number if the filter must be considered unmatched or
307 * a positive action code (TC_ACT_*) which must be returned to the
308 * underlying layer.
309 */
310static inline int
311tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
312 struct tcf_result *res)
313{
314#ifdef CONFIG_NET_CLS_ACT
Jiri Pirkoec1a9cc2017-08-04 14:29:02 +0200315 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200316#endif
Jiri Pirkoaf089e72017-08-04 14:29:01 +0200317 return TC_ACT_OK;
Jiri Pirkoaf69afc2017-08-04 14:28:59 +0200318}
319
Joe Perches5c152572013-07-30 22:47:13 -0700320int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
321 struct nlattr **tb, struct nlattr *rate_tlv,
Cong Wang695176b2021-07-29 16:12:14 -0700322 struct tcf_exts *exts, u32 flags,
Alexander Aring50a56192018-01-18 11:20:52 -0500323 struct netlink_ext_ack *extack);
WANG Cong18d02642014-09-25 10:26:37 -0700324void tcf_exts_destroy(struct tcf_exts *exts);
Jiri Pirko9b0d4442017-08-04 14:29:15 +0200325void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
WANG Cong5da57f42013-12-15 20:15:07 -0800326int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
Vlad Buslovca44b732020-05-15 14:40:12 +0300327int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
WANG Cong5da57f42013-12-15 20:15:07 -0800328int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/**
331 * struct tcf_pkt_info - packet information
Bijie Xu0161d152021-08-03 11:40:19 +0200332 *
333 * @ptr: start of the pkt data
334 * @nexthdr: offset of the next header
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000336struct tcf_pkt_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 unsigned char * ptr;
338 int nexthdr;
339};
340
341#ifdef CONFIG_NET_EMATCH
342
343struct tcf_ematch_ops;
344
345/**
346 * struct tcf_ematch - extended match (ematch)
347 *
348 * @matchid: identifier to allow userspace to reidentify a match
349 * @flags: flags specifying attributes and the relation to other matches
350 * @ops: the operations lookup table of the corresponding ematch module
351 * @datalen: length of the ematch specific configuration data
352 * @data: ematch specific data
Bijie Xu0161d152021-08-03 11:40:19 +0200353 * @net: the network namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000355struct tcf_ematch {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 struct tcf_ematch_ops * ops;
357 unsigned long data;
358 unsigned int datalen;
359 u16 matchid;
360 u16 flags;
John Fastabend82a470f2014-10-05 21:27:53 -0700361 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362};
363
364static inline int tcf_em_is_container(struct tcf_ematch *em)
365{
366 return !em->ops;
367}
368
369static inline int tcf_em_is_simple(struct tcf_ematch *em)
370{
371 return em->flags & TCF_EM_SIMPLE;
372}
373
374static inline int tcf_em_is_inverted(struct tcf_ematch *em)
375{
376 return em->flags & TCF_EM_INVERT;
377}
378
379static inline int tcf_em_last_match(struct tcf_ematch *em)
380{
381 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
382}
383
384static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
385{
386 if (tcf_em_last_match(em))
387 return 1;
388
389 if (result == 0 && em->flags & TCF_EM_REL_AND)
390 return 1;
391
392 if (result != 0 && em->flags & TCF_EM_REL_OR)
393 return 1;
394
395 return 0;
396}
397
398/**
399 * struct tcf_ematch_tree - ematch tree handle
400 *
401 * @hdr: ematch tree header supplied by userspace
402 * @matches: array of ematches
403 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000404struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 struct tcf_ematch_tree_hdr hdr;
406 struct tcf_ematch * matches;
407
408};
409
410/**
411 * struct tcf_ematch_ops - ematch module operations
412 *
413 * @kind: identifier (kind) of this ematch module
414 * @datalen: length of expected configuration data (optional)
415 * @change: called during validation (optional)
416 * @match: called during ematch tree evaluation, must return 1/0
417 * @destroy: called during destroyage (optional)
418 * @dump: called during dumping process (optional)
419 * @owner: owner, must be set to THIS_MODULE
420 * @link: link to previous/next ematch module (internal use)
421 */
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000422struct tcf_ematch_ops {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 int kind;
424 int datalen;
John Fastabend82a470f2014-10-05 21:27:53 -0700425 int (*change)(struct net *net, void *,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 int, struct tcf_ematch *);
427 int (*match)(struct sk_buff *, struct tcf_ematch *,
428 struct tcf_pkt_info *);
John Fastabend82a470f2014-10-05 21:27:53 -0700429 void (*destroy)(struct tcf_ematch *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 int (*dump)(struct sk_buff *, struct tcf_ematch *);
431 struct module *owner;
432 struct list_head link;
433};
434
Joe Perches5c152572013-07-30 22:47:13 -0700435int tcf_em_register(struct tcf_ematch_ops *);
436void tcf_em_unregister(struct tcf_ematch_ops *);
437int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
438 struct tcf_ematch_tree *);
John Fastabend82a470f2014-10-05 21:27:53 -0700439void tcf_em_tree_destroy(struct tcf_ematch_tree *);
Joe Perches5c152572013-07-30 22:47:13 -0700440int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
441int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
442 struct tcf_pkt_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 * tcf_em_tree_match - evaulate an ematch tree
446 *
447 * @skb: socket buffer of the packet in question
448 * @tree: ematch tree to be used for evaluation
449 * @info: packet information examined by classifier
450 *
451 * This function matches @skb against the ematch tree in @tree by going
452 * through all ematches respecting their logic relations returning
453 * as soon as the result is obvious.
454 *
455 * Returns 1 if the ematch tree as-one matches, no ematches are configured
456 * or ematch is not enabled in the kernel, otherwise 0 is returned.
457 */
458static inline int tcf_em_tree_match(struct sk_buff *skb,
459 struct tcf_ematch_tree *tree,
460 struct tcf_pkt_info *info)
461{
462 if (tree->hdr.nmatches)
463 return __tcf_em_tree_match(skb, tree, info);
464 else
465 return 1;
466}
467
Patrick McHardydb3d99c2007-07-11 19:46:26 -0700468#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470#else /* CONFIG_NET_EMATCH */
471
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000472struct tcf_ematch_tree {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473};
474
475#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
John Fastabend82a470f2014-10-05 21:27:53 -0700476#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477#define tcf_em_tree_dump(skb, t, tlv) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
479
480#endif /* CONFIG_NET_EMATCH */
481
482static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
483{
484 switch (layer) {
485 case TCF_LAYER_LINK:
Wolfgang Bumillerd3303a62018-01-18 11:32:36 +0100486 return skb_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 case TCF_LAYER_NETWORK:
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700488 return skb_network_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 case TCF_LAYER_TRANSPORT:
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -0700490 return skb_transport_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492
493 return NULL;
494}
495
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700496static inline int tcf_valid_offset(const struct sk_buff *skb,
497 const unsigned char *ptr, const int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
David S. Millerda521b22010-12-21 12:43:16 -0800499 return likely((ptr + len) <= skb_tail_pointer(skb) &&
500 ptr >= skb->head &&
501 (ptr <= (ptr + len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504static inline int
Alexander Aring1057c552018-01-18 11:20:54 -0500505tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
506 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
WANG Cong2519a602014-01-09 16:14:02 -0800508 char indev[IFNAMSIZ];
Patrick McHardyc01003c2007-03-29 11:46:52 -0700509 struct net_device *dev;
510
Francis Laniel872f6902020-11-15 18:08:06 +0100511 if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
Guillaume Naulte4a58ef2020-03-23 21:48:47 +0100512 NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
513 "Interface name too long");
WANG Cong2519a602014-01-09 16:14:02 -0800514 return -EINVAL;
Alexander Aring1057c552018-01-18 11:20:54 -0500515 }
WANG Cong2519a602014-01-09 16:14:02 -0800516 dev = __dev_get_by_name(net, indev);
Guillaume Naulte4a58ef2020-03-23 21:48:47 +0100517 if (!dev) {
518 NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
519 "Network device not found");
WANG Cong2519a602014-01-09 16:14:02 -0800520 return -ENODEV;
Guillaume Naulte4a58ef2020-03-23 21:48:47 +0100521 }
WANG Cong2519a602014-01-09 16:14:02 -0800522 return dev->ifindex;
523}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
WANG Cong2519a602014-01-09 16:14:02 -0800525static inline bool
526tcf_match_indev(struct sk_buff *skb, int ifindex)
527{
528 if (!ifindex)
529 return true;
530 if (!skb->skb_iif)
531 return false;
532 return ifindex == skb->skb_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
Pablo Neira Ayuso3a7b6862019-02-02 12:50:46 +0100535int tc_setup_flow_action(struct flow_action *flow_action,
Vlad Buslovb15e7a62020-02-17 12:12:12 +0200536 const struct tcf_exts *exts);
Vlad Buslov5a6ff4b2019-08-26 16:45:04 +0300537void tc_cleanup_flow_action(struct flow_action *flow_action);
538
Cong Wangaeb3fec2018-12-11 11:15:46 -0800539int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
Vlad Buslov40119212019-08-26 16:44:59 +0300540 void *type_data, bool err_stop, bool rtnl_held);
541int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
542 enum tc_setup_type type, void *type_data, bool err_stop,
543 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
544int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
545 enum tc_setup_type type, void *type_data, bool err_stop,
546 u32 *old_flags, unsigned int *old_in_hw_count,
547 u32 *new_flags, unsigned int *new_in_hw_count,
548 bool rtnl_held);
549int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
550 enum tc_setup_type type, void *type_data, bool err_stop,
551 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
552int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
553 bool add, flow_setup_cb_t *cb,
554 enum tc_setup_type type, void *type_data,
555 void *cb_priv, u32 *flags, unsigned int *in_hw_count);
Pablo Neira Ayusoe3ab7862019-02-02 12:50:45 +0100556unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
Jiri Pirko717503b2017-10-11 09:41:09 +0200557
Petr Machata36257502020-06-27 01:45:26 +0300558#ifdef CONFIG_NET_CLS_ACT
559int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
560 enum flow_block_binder_type binder_type,
561 struct nlattr *block_index_attr,
562 struct netlink_ext_ack *extack);
563void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
564int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
565 struct netlink_ext_ack *extack);
566struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
Petr Machata55f656c2020-07-14 20:03:07 +0300567 struct sk_buff **to_free, int *ret);
Petr Machata36257502020-06-27 01:45:26 +0300568int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
569#else
570static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
571 enum flow_block_binder_type binder_type,
572 struct nlattr *block_index_attr,
573 struct netlink_ext_ack *extack)
574{
575 return 0;
576}
577
578static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
579{
580}
581
582static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
583 struct netlink_ext_ack *extack)
584{
585 return 0;
586}
587
588static inline struct sk_buff *
589tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
Petr Machata55f656c2020-07-14 20:03:07 +0300590 struct sk_buff **to_free, int *ret)
Petr Machata36257502020-06-27 01:45:26 +0300591{
592 return skb;
593}
594
595static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
596{
597 return 0;
598}
599#endif
600
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800601struct tc_cls_u32_knode {
602 struct tcf_exts *exts;
Jakub Kicinski068ceb32018-11-19 15:21:46 -0800603 struct tcf_result *res;
John Fastabende0148602016-02-17 14:59:30 -0800604 struct tc_u32_sel *sel;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800605 u32 handle;
606 u32 val;
607 u32 mask;
608 u32 link_handle;
John Fastabende0148602016-02-17 14:59:30 -0800609 u8 fshift;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800610};
611
612struct tc_cls_u32_hnode {
613 u32 handle;
614 u32 prio;
615 unsigned int divisor;
616};
617
618enum tc_clsu32_command {
619 TC_CLSU32_NEW_KNODE,
620 TC_CLSU32_REPLACE_KNODE,
621 TC_CLSU32_DELETE_KNODE,
622 TC_CLSU32_NEW_HNODE,
623 TC_CLSU32_REPLACE_HNODE,
624 TC_CLSU32_DELETE_HNODE,
625};
626
627struct tc_cls_u32_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200628 struct flow_cls_common_offload common;
John Fastabenda1b7c5f2016-02-16 21:17:09 -0800629 /* knode values */
630 enum tc_clsu32_command command;
631 union {
632 struct tc_cls_u32_knode knode;
633 struct tc_cls_u32_hnode hnode;
634 };
635};
636
Jiri Pirko7b06e8a2017-08-09 14:30:35 +0200637static inline bool tc_can_offload(const struct net_device *dev)
John Fastabend6843e7a2016-02-26 07:53:49 -0800638{
Jiri Pirko70b5aee2017-11-01 11:47:41 +0100639 return dev->features & NETIF_F_HW_TC;
John Fastabend6843e7a2016-02-26 07:53:49 -0800640}
641
Quentin Monnetf9eda142018-01-19 17:44:48 -0800642static inline bool tc_can_offload_extack(const struct net_device *dev,
643 struct netlink_ext_ack *extack)
644{
645 bool can = tc_can_offload(dev);
646
647 if (!can)
648 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
649
650 return can;
651}
652
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800653static inline bool
654tc_cls_can_offload_and_chain0(const struct net_device *dev,
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200655 struct flow_cls_common_offload *common)
Jakub Kicinski878db9f2018-01-25 14:00:43 -0800656{
657 if (!tc_can_offload_extack(dev, common->extack))
658 return false;
659 if (common->chain_index) {
660 NL_SET_ERR_MSG(common->extack,
661 "Driver supports only offload of chain 0");
662 return false;
663 }
664 return true;
665}
666
Hadar Hen Zion55330f02016-12-01 14:06:33 +0200667static inline bool tc_skip_hw(u32 flags)
668{
669 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
670}
671
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700672static inline bool tc_skip_sw(u32 flags)
673{
674 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
675}
676
677/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
678static inline bool tc_flags_valid(u32 flags)
679{
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300680 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
681 TCA_CLS_FLAGS_VERBOSE))
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700682 return false;
683
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300684 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
Samudrala, Sridhard34e3e12016-05-12 17:08:23 -0700685 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
686 return false;
687
688 return true;
689}
690
Or Gerlitze6960282017-02-16 10:31:12 +0200691static inline bool tc_in_hw(u32 flags)
692{
693 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
694}
695
Jakub Kicinski34832e12018-01-24 12:54:14 -0800696static inline void
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200697tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
Jakub Kicinski34832e12018-01-24 12:54:14 -0800698 const struct tcf_proto *tp, u32 flags,
699 struct netlink_ext_ack *extack)
700{
701 cls_common->chain_index = tp->chain->index;
702 cls_common->protocol = tp->protocol;
Pablo Neira Ayusoef01ada2019-08-16 03:24:09 +0200703 cls_common->prio = tp->prio >> 16;
Marcelo Ricardo Leitner81c72882018-05-13 17:44:27 -0300704 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
Jakub Kicinski34832e12018-01-24 12:54:14 -0800705 cls_common->extack = extack;
706}
707
Vlad Buslov9453d452021-05-25 16:21:52 +0300708#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
709static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
710{
711 struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
712
713 if (tc_skb_ext)
714 memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
715 return tc_skb_ext;
716}
717#endif
718
Yotam Gigib87f7932016-07-21 12:03:12 +0200719enum tc_matchall_command {
720 TC_CLSMATCHALL_REPLACE,
721 TC_CLSMATCHALL_DESTROY,
Pieter Jansen van Vuurenb7fe4ab2019-05-04 04:46:23 -0700722 TC_CLSMATCHALL_STATS,
Yotam Gigib87f7932016-07-21 12:03:12 +0200723};
724
725struct tc_cls_matchall_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200726 struct flow_cls_common_offload common;
Yotam Gigib87f7932016-07-21 12:03:12 +0200727 enum tc_matchall_command command;
Pieter Jansen van Vuurenf00cbf192019-05-04 04:46:17 -0700728 struct flow_rule *rule;
Pieter Jansen van Vuurenb7fe4ab2019-05-04 04:46:23 -0700729 struct flow_stats stats;
Yotam Gigib87f7932016-07-21 12:03:12 +0200730 unsigned long cookie;
731};
732
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100733enum tc_clsbpf_command {
Jakub Kicinski102740b2017-12-19 13:32:13 -0800734 TC_CLSBPF_OFFLOAD,
Jakub Kicinski68d64062016-09-21 11:44:02 +0100735 TC_CLSBPF_STATS,
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100736};
737
738struct tc_cls_bpf_offload {
Pablo Neira Ayusof9e30082019-07-09 22:55:49 +0200739 struct flow_cls_common_offload common;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100740 enum tc_clsbpf_command command;
741 struct tcf_exts *exts;
742 struct bpf_prog *prog;
Jakub Kicinski102740b2017-12-19 13:32:13 -0800743 struct bpf_prog *oldprog;
Jakub Kicinski332ae8e2016-09-21 11:43:53 +0100744 const char *name;
745 bool exts_integrated;
746};
747
Amritha Nambiar4e8b86c2017-09-07 04:00:06 -0700748struct tc_mqprio_qopt_offload {
749 /* struct tc_mqprio_qopt must always be the first element */
750 struct tc_mqprio_qopt qopt;
751 u16 mode;
752 u16 shaper;
753 u32 flags;
754 u64 min_rate[TC_QOPT_MAX_QUEUE];
755 u64 max_rate[TC_QOPT_MAX_QUEUE];
756};
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500757
758/* This structure holds cookie structure that is passed from user
759 * to the kernel for actions and classifiers
760 */
761struct tc_cookie {
762 u8 *data;
763 u32 len;
Vlad Busloveec94fd2018-07-05 17:24:23 +0300764 struct rcu_head rcu;
Jamal Hadi Salim1045ba72017-01-24 07:02:41 -0500765};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100766
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100767struct tc_qopt_offload_stats {
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200768 struct gnet_stats_basic_sync *bstats;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100769 struct gnet_stats_queue *qstats;
770};
771
Jakub Kicinskif971b132018-05-25 21:53:35 -0700772enum tc_mq_command {
773 TC_MQ_CREATE,
774 TC_MQ_DESTROY,
Jakub Kicinski47c669a42018-05-25 21:53:37 -0700775 TC_MQ_STATS,
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800776 TC_MQ_GRAFT,
777};
778
779struct tc_mq_opt_offload_graft_params {
780 unsigned long queue;
781 u32 child_handle;
Jakub Kicinskif971b132018-05-25 21:53:35 -0700782};
783
784struct tc_mq_qopt_offload {
785 enum tc_mq_command command;
786 u32 handle;
Jakub Kicinskid577a3d2018-11-12 14:58:14 -0800787 union {
788 struct tc_qopt_offload_stats stats;
789 struct tc_mq_opt_offload_graft_params graft_params;
790 };
Jakub Kicinskif971b132018-05-25 21:53:35 -0700791};
792
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200793enum tc_htb_command {
794 /* Root */
795 TC_HTB_CREATE, /* Initialize HTB offload. */
796 TC_HTB_DESTROY, /* Destroy HTB offload. */
797
798 /* Classes */
799 /* Allocate qid and create leaf. */
800 TC_HTB_LEAF_ALLOC_QUEUE,
801 /* Convert leaf to inner, preserve and return qid, create new leaf. */
802 TC_HTB_LEAF_TO_INNER,
803 /* Delete leaf, while siblings remain. */
804 TC_HTB_LEAF_DEL,
805 /* Delete leaf, convert parent to leaf, preserving qid. */
806 TC_HTB_LEAF_DEL_LAST,
807 /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
808 TC_HTB_LEAF_DEL_LAST_FORCE,
809 /* Modify parameters of a node. */
810 TC_HTB_NODE_MODIFY,
811
812 /* Class qdisc */
813 TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
814};
815
816struct tc_htb_qopt_offload {
817 struct netlink_ext_ack *extack;
818 enum tc_htb_command command;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200819 u32 parent_classid;
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +0300820 u16 classid;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200821 u16 qid;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200822 u64 rate;
823 u64 ceil;
824};
825
826#define TC_HTB_CLASSID_ROOT U32_MAX
827
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100828enum tc_red_command {
829 TC_RED_REPLACE,
830 TC_RED_DESTROY,
831 TC_RED_STATS,
832 TC_RED_XSTATS,
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800833 TC_RED_GRAFT,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100834};
835
836struct tc_red_qopt_offload_params {
837 u32 min;
838 u32 max;
839 u32 probability;
Jakub Kicinskic0b74902018-11-12 14:58:16 -0800840 u32 limit;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100841 bool is_ecn;
Jakub Kicinski190852a2018-11-08 19:50:38 -0800842 bool is_harddrop;
Petr Machata0a7fad22020-03-13 01:10:57 +0200843 bool is_nodrop;
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800844 struct gnet_stats_queue *qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100845};
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100846
847struct tc_red_qopt_offload {
848 enum tc_red_command command;
849 u32 handle;
850 u32 parent;
851 union {
852 struct tc_red_qopt_offload_params set;
Nogah Frankelf34b4aa2018-01-10 14:59:58 +0100853 struct tc_qopt_offload_stats stats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100854 struct red_stats *xstats;
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800855 u32 child_handle;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100856 };
857};
858
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800859enum tc_gred_command {
860 TC_GRED_REPLACE,
861 TC_GRED_DESTROY,
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800862 TC_GRED_STATS,
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800863};
864
865struct tc_gred_vq_qopt_offload_params {
866 bool present;
867 u32 limit;
868 u32 prio;
869 u32 min;
870 u32 max;
871 bool is_ecn;
872 bool is_harddrop;
873 u32 probability;
874 /* Only need backlog, see struct tc_prio_qopt_offload_params */
875 u32 *backlog;
876};
877
878struct tc_gred_qopt_offload_params {
879 bool grio_on;
880 bool wred_on;
881 unsigned int dp_cnt;
882 unsigned int dp_def;
883 struct gnet_stats_queue *qstats;
884 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
885};
886
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800887struct tc_gred_qopt_offload_stats {
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200888 struct gnet_stats_basic_sync bstats[MAX_DPs];
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800889 struct gnet_stats_queue qstats[MAX_DPs];
890 struct red_stats *xstats[MAX_DPs];
891};
892
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800893struct tc_gred_qopt_offload {
894 enum tc_gred_command command;
895 u32 handle;
896 u32 parent;
897 union {
898 struct tc_gred_qopt_offload_params set;
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800899 struct tc_gred_qopt_offload_stats stats;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800900 };
901};
902
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100903enum tc_prio_command {
904 TC_PRIO_REPLACE,
905 TC_PRIO_DESTROY,
906 TC_PRIO_STATS,
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100907 TC_PRIO_GRAFT,
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100908};
909
910struct tc_prio_qopt_offload_params {
911 int bands;
912 u8 priomap[TC_PRIO_MAX + 1];
Petr Machata9586a992019-12-18 14:55:08 +0000913 /* At the point of un-offloading the Qdisc, the reported backlog and
914 * qlen need to be reduced by the portion that is in HW.
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100915 */
916 struct gnet_stats_queue *qstats;
917};
918
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100919struct tc_prio_qopt_offload_graft_params {
920 u8 band;
921 u32 child_handle;
922};
923
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100924struct tc_prio_qopt_offload {
925 enum tc_prio_command command;
926 u32 handle;
927 u32 parent;
928 union {
929 struct tc_prio_qopt_offload_params replace_params;
930 struct tc_qopt_offload_stats stats;
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100931 struct tc_prio_qopt_offload_graft_params graft_params;
Nogah Frankel7fdb61b2018-01-14 12:33:15 +0100932 };
933};
Nogah Frankelb9c7a7a2018-02-28 10:45:06 +0100934
Jakub Kicinski98b0e5f2018-11-12 14:58:10 -0800935enum tc_root_command {
936 TC_ROOT_GRAFT,
937};
938
939struct tc_root_qopt_offload {
940 enum tc_root_command command;
941 u32 handle;
942 bool ingress;
943};
944
Petr Machatad35eb522019-12-18 14:55:15 +0000945enum tc_ets_command {
946 TC_ETS_REPLACE,
947 TC_ETS_DESTROY,
948 TC_ETS_STATS,
949 TC_ETS_GRAFT,
950};
951
952struct tc_ets_qopt_offload_replace_params {
953 unsigned int bands;
954 u8 priomap[TC_PRIO_MAX + 1];
955 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
956 unsigned int weights[TCQ_ETS_MAX_BANDS];
957 struct gnet_stats_queue *qstats;
958};
959
960struct tc_ets_qopt_offload_graft_params {
961 u8 band;
962 u32 child_handle;
963};
964
965struct tc_ets_qopt_offload {
966 enum tc_ets_command command;
967 u32 handle;
968 u32 parent;
969 union {
970 struct tc_ets_qopt_offload_replace_params replace_params;
971 struct tc_qopt_offload_stats stats;
972 struct tc_ets_qopt_offload_graft_params graft_params;
973 };
974};
975
Petr Machataef6aadc2020-01-24 15:23:06 +0200976enum tc_tbf_command {
977 TC_TBF_REPLACE,
978 TC_TBF_DESTROY,
979 TC_TBF_STATS,
Petr Machata6b3efbf2021-10-19 11:07:04 +0300980 TC_TBF_GRAFT,
Petr Machataef6aadc2020-01-24 15:23:06 +0200981};
982
983struct tc_tbf_qopt_offload_replace_params {
984 struct psched_ratecfg rate;
985 u32 max_size;
986 struct gnet_stats_queue *qstats;
987};
988
989struct tc_tbf_qopt_offload {
990 enum tc_tbf_command command;
991 u32 handle;
992 u32 parent;
993 union {
994 struct tc_tbf_qopt_offload_replace_params replace_params;
995 struct tc_qopt_offload_stats stats;
Petr Machata6b3efbf2021-10-19 11:07:04 +0300996 u32 child_handle;
Petr Machataef6aadc2020-01-24 15:23:06 +0200997 };
998};
999
Petr Machataaaca9402020-03-05 09:16:40 +02001000enum tc_fifo_command {
1001 TC_FIFO_REPLACE,
1002 TC_FIFO_DESTROY,
1003 TC_FIFO_STATS,
1004};
1005
1006struct tc_fifo_qopt_offload {
1007 enum tc_fifo_command command;
1008 u32 handle;
1009 u32 parent;
1010 union {
1011 struct tc_qopt_offload_stats stats;
1012 };
1013};
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015#endif