blob: 29806eb765cf1a815b50296cbcb8e793b48777c0 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Jiri Pirkofbff9492015-05-12 14:56:15 +02002#include <linux/kernel.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +00003#include <linux/skbuff.h>
Jesper Dangaard Brouerc452ed72012-01-24 16:03:33 -05004#include <linux/export.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +00005#include <linux/ip.h>
6#include <linux/ipv6.h>
7#include <linux/if_vlan.h>
John Crispin43e66522017-08-09 14:41:19 +02008#include <net/dsa.h>
Simon Hormana38402b2017-10-02 10:41:16 +02009#include <net/dst_metadata.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +000010#include <net/ip.h>
Eric Dumazetddbe5032012-07-18 08:11:12 +000011#include <net/ipv6.h>
Gao Fengab10dcc2016-08-09 12:38:24 +080012#include <net/gre.h>
13#include <net/pptp.h>
Jon Maloy8d6e79d2017-11-08 09:59:26 +010014#include <net/tipc.h>
Daniel Borkmannf77668d2013-03-19 06:39:30 +000015#include <linux/igmp.h>
16#include <linux/icmp.h>
17#include <linux/sctp.h>
18#include <linux/dccp.h>
Eric Dumazet0744dd02011-11-28 05:22:18 +000019#include <linux/if_tunnel.h>
20#include <linux/if_pppox.h>
21#include <linux/ppp_defs.h>
Jiri Pirko06635a32015-05-12 14:56:16 +020022#include <linux/stddef.h>
Jiri Pirko67a900c2015-05-12 14:56:19 +020023#include <linux/if_ether.h>
Tom Herbertb3baa0f2015-06-04 09:16:46 -070024#include <linux/mpls.h>
Jiri Pirkoac4bb5d2017-05-23 18:40:44 +020025#include <linux/tcp.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020026#include <net/flow_dissector.h>
Alexander Duyck56193d12014-09-05 19:20:26 -040027#include <scsi/fc/fc_fcoe.h>
Sven Eckelmann5b0890a2017-12-21 10:17:42 +010028#include <uapi/linux/batadv_packet.h>
Petar Penkovd58e4682018-09-14 07:46:18 -070029#include <linux/bpf.h>
Paul Blakey75a56752019-07-09 10:30:49 +030030#if IS_ENABLED(CONFIG_NF_CONNTRACK)
31#include <net/netfilter/nf_conntrack_core.h>
32#include <net/netfilter/nf_conntrack_labels.h>
33#endif
Jakub Sitnickia3fd7ce2020-05-31 10:28:36 +020034#include <linux/bpf-netns.h>
Petar Penkovd58e4682018-09-14 07:46:18 -070035
David S. Miller20a17bf2015-09-01 21:19:17 -070036static void dissector_set_key(struct flow_dissector *flow_dissector,
37 enum flow_dissector_key_id key_id)
Jiri Pirkofbff9492015-05-12 14:56:15 +020038{
39 flow_dissector->used_keys |= (1 << key_id);
40}
41
Jiri Pirkofbff9492015-05-12 14:56:15 +020042void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
43 const struct flow_dissector_key *key,
44 unsigned int key_count)
45{
46 unsigned int i;
47
48 memset(flow_dissector, 0, sizeof(*flow_dissector));
49
50 for (i = 0; i < key_count; i++, key++) {
51 /* User should make sure that every key target offset is withing
52 * boundaries of unsigned short.
53 */
54 BUG_ON(key->offset > USHRT_MAX);
David S. Miller20a17bf2015-09-01 21:19:17 -070055 BUG_ON(dissector_uses_key(flow_dissector,
56 key->key_id));
Jiri Pirkofbff9492015-05-12 14:56:15 +020057
David S. Miller20a17bf2015-09-01 21:19:17 -070058 dissector_set_key(flow_dissector, key->key_id);
Jiri Pirkofbff9492015-05-12 14:56:15 +020059 flow_dissector->offset[key->key_id] = key->offset;
60 }
61
Tom Herbert42aecaa2015-06-04 09:16:39 -070062 /* Ensure that the dissector always includes control and basic key.
63 * That way we are able to avoid handling lack of these in fast path.
Jiri Pirkofbff9492015-05-12 14:56:15 +020064 */
David S. Miller20a17bf2015-09-01 21:19:17 -070065 BUG_ON(!dissector_uses_key(flow_dissector,
66 FLOW_DISSECTOR_KEY_CONTROL));
67 BUG_ON(!dissector_uses_key(flow_dissector,
68 FLOW_DISSECTOR_KEY_BASIC));
Jiri Pirkofbff9492015-05-12 14:56:15 +020069}
70EXPORT_SYMBOL(skb_flow_dissector_init);
71
Jakub Sitnickib27f7bb2020-05-31 10:28:37 +020072#ifdef CONFIG_BPF_SYSCALL
Jakub Sitnicki3b701692020-06-25 16:13:54 +020073int flow_dissector_bpf_prog_attach_check(struct net *net,
74 struct bpf_prog *prog)
Petar Penkovd58e4682018-09-14 07:46:18 -070075{
Jakub Sitnickia3fd7ce2020-05-31 10:28:36 +020076 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
Stanislav Fomicheva11c3972019-10-07 09:21:02 -070077
78 if (net == &init_net) {
79 /* BPF flow dissector in the root namespace overrides
80 * any per-net-namespace one. When attaching to root,
81 * make sure we don't have any BPF program attached
82 * to the non-root namespaces.
83 */
84 struct net *ns;
85
86 for_each_net(ns) {
Jakub Sitnicki719b78a52019-10-11 10:29:45 +020087 if (ns == &init_net)
88 continue;
Jakub Sitnicki695c1212020-06-25 16:13:55 +020089 if (rcu_access_pointer(ns->bpf.run_array[type]))
Jakub Sitnicki171526f2020-05-31 10:28:35 +020090 return -EEXIST;
Stanislav Fomicheva11c3972019-10-07 09:21:02 -070091 }
92 } else {
93 /* Make sure root flow dissector is not attached
94 * when attaching to the non-root namespace.
95 */
Jakub Sitnicki695c1212020-06-25 16:13:55 +020096 if (rcu_access_pointer(init_net.bpf.run_array[type]))
Jakub Sitnicki171526f2020-05-31 10:28:35 +020097 return -EEXIST;
Stanislav Fomicheva11c3972019-10-07 09:21:02 -070098 }
99
Jakub Sitnicki171526f2020-05-31 10:28:35 +0200100 return 0;
101}
Jakub Sitnickib27f7bb2020-05-31 10:28:37 +0200102#endif /* CONFIG_BPF_SYSCALL */
Jakub Sitnicki5cf65922020-05-21 10:34:35 +0200103
Simon Horman972d3872016-12-07 13:48:27 +0100104/**
WANG Cong6451b3f2014-08-25 17:03:46 -0700105 * __skb_flow_get_ports - extract the upper layer ports and return them
106 * @skb: sk_buff to extract the ports from
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200107 * @thoff: transport header offset
108 * @ip_proto: protocol for which to get port offset
WANG Cong6451b3f2014-08-25 17:03:46 -0700109 * @data: raw buffer pointer to the packet, if NULL use skb->data
110 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200111 *
112 * The function will try to retrieve the ports at offset thoff + poff where poff
113 * is the protocol port offset returned from proto_ports_offset
114 */
David S. Miller690e36e2014-08-23 12:13:41 -0700115__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
116 void *data, int hlen)
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200117{
118 int poff = proto_ports_offset(ip_proto);
119
David S. Miller690e36e2014-08-23 12:13:41 -0700120 if (!data) {
121 data = skb->data;
122 hlen = skb_headlen(skb);
123 }
124
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200125 if (poff >= 0) {
126 __be32 *ports, _ports;
127
David S. Miller690e36e2014-08-23 12:13:41 -0700128 ports = __skb_header_pointer(skb, thoff + poff,
129 sizeof(_ports), data, hlen, &_ports);
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200130 if (ports)
131 return *ports;
132 }
133
134 return 0;
135}
David S. Miller690e36e2014-08-23 12:13:41 -0700136EXPORT_SYMBOL(__skb_flow_get_ports);
Nikolay Aleksandrov357afe92013-10-02 13:39:24 +0200137
Matteo Croce5dec5972019-10-29 14:50:52 +0100138static bool icmp_has_id(u8 type)
139{
140 switch (type) {
141 case ICMP_ECHO:
142 case ICMP_ECHOREPLY:
143 case ICMP_TIMESTAMP:
144 case ICMP_TIMESTAMPREPLY:
145 case ICMPV6_ECHO_REQUEST:
146 case ICMPV6_ECHO_REPLY:
147 return true;
148 }
149
150 return false;
151}
152
153/**
154 * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
155 * @skb: sk_buff to extract from
156 * @key_icmp: struct flow_dissector_key_icmp to fill
157 * @data: raw buffer pointer to the packet
Li RongQing6b3acfc2020-01-09 08:59:56 +0800158 * @thoff: offset to extract at
Matteo Croce5dec5972019-10-29 14:50:52 +0100159 * @hlen: packet header length
160 */
161void skb_flow_get_icmp_tci(const struct sk_buff *skb,
162 struct flow_dissector_key_icmp *key_icmp,
163 void *data, int thoff, int hlen)
164{
165 struct icmphdr *ih, _ih;
166
167 ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
168 if (!ih)
169 return;
170
171 key_icmp->type = ih->type;
172 key_icmp->code = ih->code;
173
174 /* As we use 0 to signal that the Id field is not present,
175 * avoid confusion with packets without such field
176 */
177 if (icmp_has_id(ih->type))
178 key_icmp->id = ih->un.echo.id ? : 1;
179 else
180 key_icmp->id = 0;
181}
182EXPORT_SYMBOL(skb_flow_get_icmp_tci);
183
184/* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
185 * using skb_flow_get_icmp_tci().
Matteo Croce3b336d6f2019-10-29 14:50:51 +0100186 */
187static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
188 struct flow_dissector *flow_dissector,
189 void *target_container,
190 void *data, int thoff, int hlen)
191{
192 struct flow_dissector_key_icmp *key_icmp;
193
194 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
195 return;
196
197 key_icmp = skb_flow_dissector_target(flow_dissector,
198 FLOW_DISSECTOR_KEY_ICMP,
199 target_container);
Matteo Croce5dec5972019-10-29 14:50:52 +0100200
201 skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
Matteo Croce3b336d6f2019-10-29 14:50:51 +0100202}
203
Jiri Pirko82828b82019-06-19 09:41:02 +0300204void skb_flow_dissect_meta(const struct sk_buff *skb,
205 struct flow_dissector *flow_dissector,
206 void *target_container)
207{
208 struct flow_dissector_key_meta *meta;
209
210 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META))
211 return;
212
213 meta = skb_flow_dissector_target(flow_dissector,
214 FLOW_DISSECTOR_KEY_META,
215 target_container);
216 meta->ingress_ifindex = skb->skb_iif;
217}
218EXPORT_SYMBOL(skb_flow_dissect_meta);
219
Simon Hormana38402b2017-10-02 10:41:16 +0200220static void
221skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
222 struct flow_dissector *flow_dissector,
223 void *target_container)
224{
225 struct flow_dissector_key_control *ctrl;
226
227 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
228 return;
229
230 ctrl = skb_flow_dissector_target(flow_dissector,
231 FLOW_DISSECTOR_KEY_ENC_CONTROL,
232 target_container);
233 ctrl->addr_type = type;
234}
235
Simon Horman62b32372017-12-04 11:31:48 +0100236void
Paul Blakey75a56752019-07-09 10:30:49 +0300237skb_flow_dissect_ct(const struct sk_buff *skb,
238 struct flow_dissector *flow_dissector,
239 void *target_container,
240 u16 *ctinfo_map,
241 size_t mapsize)
242{
243#if IS_ENABLED(CONFIG_NF_CONNTRACK)
244 struct flow_dissector_key_ct *key;
245 enum ip_conntrack_info ctinfo;
246 struct nf_conn_labels *cl;
247 struct nf_conn *ct;
248
249 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT))
250 return;
251
252 ct = nf_ct_get(skb, &ctinfo);
253 if (!ct)
254 return;
255
256 key = skb_flow_dissector_target(flow_dissector,
257 FLOW_DISSECTOR_KEY_CT,
258 target_container);
259
260 if (ctinfo < mapsize)
261 key->ct_state = ctinfo_map[ctinfo];
262#if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)
263 key->ct_zone = ct->zone.id;
264#endif
265#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
266 key->ct_mark = ct->mark;
267#endif
268
269 cl = nf_ct_labels_find(ct);
270 if (cl)
271 memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels));
272#endif /* CONFIG_NF_CONNTRACK */
273}
274EXPORT_SYMBOL(skb_flow_dissect_ct);
275
276void
Simon Horman62b32372017-12-04 11:31:48 +0100277skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
278 struct flow_dissector *flow_dissector,
279 void *target_container)
Simon Hormana38402b2017-10-02 10:41:16 +0200280{
281 struct ip_tunnel_info *info;
282 struct ip_tunnel_key *key;
283
284 /* A quick check to see if there might be something to do. */
285 if (!dissector_uses_key(flow_dissector,
286 FLOW_DISSECTOR_KEY_ENC_KEYID) &&
287 !dissector_uses_key(flow_dissector,
288 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
289 !dissector_uses_key(flow_dissector,
290 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
291 !dissector_uses_key(flow_dissector,
292 FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
293 !dissector_uses_key(flow_dissector,
Or Gerlitz5544adb2018-07-17 19:27:17 +0300294 FLOW_DISSECTOR_KEY_ENC_PORTS) &&
295 !dissector_uses_key(flow_dissector,
Simon Horman92e2c402018-08-07 17:36:00 +0200296 FLOW_DISSECTOR_KEY_ENC_IP) &&
297 !dissector_uses_key(flow_dissector,
298 FLOW_DISSECTOR_KEY_ENC_OPTS))
Simon Hormana38402b2017-10-02 10:41:16 +0200299 return;
300
301 info = skb_tunnel_info(skb);
302 if (!info)
303 return;
304
305 key = &info->key;
306
307 switch (ip_tunnel_info_af(info)) {
308 case AF_INET:
309 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
310 flow_dissector,
311 target_container);
312 if (dissector_uses_key(flow_dissector,
313 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
314 struct flow_dissector_key_ipv4_addrs *ipv4;
315
316 ipv4 = skb_flow_dissector_target(flow_dissector,
317 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
318 target_container);
319 ipv4->src = key->u.ipv4.src;
320 ipv4->dst = key->u.ipv4.dst;
321 }
322 break;
323 case AF_INET6:
324 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
325 flow_dissector,
326 target_container);
327 if (dissector_uses_key(flow_dissector,
328 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
329 struct flow_dissector_key_ipv6_addrs *ipv6;
330
331 ipv6 = skb_flow_dissector_target(flow_dissector,
332 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
333 target_container);
334 ipv6->src = key->u.ipv6.src;
335 ipv6->dst = key->u.ipv6.dst;
336 }
337 break;
338 }
339
340 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
341 struct flow_dissector_key_keyid *keyid;
342
343 keyid = skb_flow_dissector_target(flow_dissector,
344 FLOW_DISSECTOR_KEY_ENC_KEYID,
345 target_container);
346 keyid->keyid = tunnel_id_to_key32(key->tun_id);
347 }
348
349 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
350 struct flow_dissector_key_ports *tp;
351
352 tp = skb_flow_dissector_target(flow_dissector,
353 FLOW_DISSECTOR_KEY_ENC_PORTS,
354 target_container);
355 tp->src = key->tp_src;
356 tp->dst = key->tp_dst;
357 }
Or Gerlitz5544adb2018-07-17 19:27:17 +0300358
359 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
360 struct flow_dissector_key_ip *ip;
361
362 ip = skb_flow_dissector_target(flow_dissector,
363 FLOW_DISSECTOR_KEY_ENC_IP,
364 target_container);
365 ip->tos = key->tos;
366 ip->ttl = key->ttl;
367 }
Simon Horman92e2c402018-08-07 17:36:00 +0200368
369 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
370 struct flow_dissector_key_enc_opts *enc_opt;
371
372 enc_opt = skb_flow_dissector_target(flow_dissector,
373 FLOW_DISSECTOR_KEY_ENC_OPTS,
374 target_container);
375
376 if (info->options_len) {
377 enc_opt->len = info->options_len;
378 ip_tunnel_info_opts_get(enc_opt->data, info);
379 enc_opt->dst_opt_type = info->key.tun_flags &
380 TUNNEL_OPTIONS_PRESENT;
381 }
382 }
Simon Hormana38402b2017-10-02 10:41:16 +0200383}
Simon Horman62b32372017-12-04 11:31:48 +0100384EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
Simon Hormana38402b2017-10-02 10:41:16 +0200385
Ariel Levkovich0cb09af2020-07-23 01:03:00 +0300386void skb_flow_dissect_hash(const struct sk_buff *skb,
387 struct flow_dissector *flow_dissector,
388 void *target_container)
389{
390 struct flow_dissector_key_hash *key;
391
392 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH))
393 return;
394
395 key = skb_flow_dissector_target(flow_dissector,
396 FLOW_DISSECTOR_KEY_HASH,
397 target_container);
398
399 key->hash = skb_get_hash_raw(skb);
400}
401EXPORT_SYMBOL(skb_flow_dissect_hash);
402
Jiri Pirko9bf881f2017-03-06 16:39:51 +0100403static enum flow_dissect_ret
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100404__skb_flow_dissect_mpls(const struct sk_buff *skb,
405 struct flow_dissector *flow_dissector,
Guillaume Nault58cff782020-05-26 14:29:00 +0200406 void *target_container, void *data, int nhoff, int hlen,
407 int lse_index, bool *entropy_label)
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100408{
Guillaume Nault58cff782020-05-26 14:29:00 +0200409 struct mpls_label *hdr, _hdr;
410 u32 entry, label, bos;
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100411
412 if (!dissector_uses_key(flow_dissector,
Benjamin LaHaise029c1ec2017-04-22 16:52:46 -0400413 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
414 !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100415 return FLOW_DISSECT_RET_OUT_GOOD;
416
Guillaume Nault58cff782020-05-26 14:29:00 +0200417 if (lse_index >= FLOW_DIS_MPLS_MAX)
418 return FLOW_DISSECT_RET_OUT_GOOD;
419
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100420 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
421 hlen, &_hdr);
422 if (!hdr)
423 return FLOW_DISSECT_RET_OUT_BAD;
424
Guillaume Nault58cff782020-05-26 14:29:00 +0200425 entry = ntohl(hdr->entry);
Benjamin LaHaise029c1ec2017-04-22 16:52:46 -0400426 label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
Guillaume Nault58cff782020-05-26 14:29:00 +0200427 bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
Benjamin LaHaise029c1ec2017-04-22 16:52:46 -0400428
429 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
430 struct flow_dissector_key_mpls *key_mpls;
Guillaume Nault58cff782020-05-26 14:29:00 +0200431 struct flow_dissector_mpls_lse *lse;
Benjamin LaHaise029c1ec2017-04-22 16:52:46 -0400432
433 key_mpls = skb_flow_dissector_target(flow_dissector,
434 FLOW_DISSECTOR_KEY_MPLS,
435 target_container);
Guillaume Nault58cff782020-05-26 14:29:00 +0200436 lse = &key_mpls->ls[lse_index];
437
438 lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
439 lse->mpls_bos = bos;
440 lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
441 lse->mpls_label = label;
442 dissector_set_mpls_lse(key_mpls, lse_index);
Benjamin LaHaise029c1ec2017-04-22 16:52:46 -0400443 }
444
Guillaume Nault58cff782020-05-26 14:29:00 +0200445 if (*entropy_label &&
446 dissector_uses_key(flow_dissector,
447 FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
448 struct flow_dissector_key_keyid *key_keyid;
449
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100450 key_keyid = skb_flow_dissector_target(flow_dissector,
451 FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
452 target_container);
Guillaume Nault58cff782020-05-26 14:29:00 +0200453 key_keyid->keyid = cpu_to_be32(label);
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100454 }
Guillaume Nault58cff782020-05-26 14:29:00 +0200455
456 *entropy_label = label == MPLS_LABEL_ENTROPY;
457
458 return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN;
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +0100459}
460
461static enum flow_dissect_ret
Jiri Pirko9bf881f2017-03-06 16:39:51 +0100462__skb_flow_dissect_arp(const struct sk_buff *skb,
463 struct flow_dissector *flow_dissector,
464 void *target_container, void *data, int nhoff, int hlen)
465{
466 struct flow_dissector_key_arp *key_arp;
467 struct {
468 unsigned char ar_sha[ETH_ALEN];
469 unsigned char ar_sip[4];
470 unsigned char ar_tha[ETH_ALEN];
471 unsigned char ar_tip[4];
472 } *arp_eth, _arp_eth;
473 const struct arphdr *arp;
David S. Miller6f14f442017-04-06 07:25:07 -0700474 struct arphdr _arp;
Jiri Pirko9bf881f2017-03-06 16:39:51 +0100475
476 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
477 return FLOW_DISSECT_RET_OUT_GOOD;
478
479 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
480 hlen, &_arp);
481 if (!arp)
482 return FLOW_DISSECT_RET_OUT_BAD;
483
484 if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
485 arp->ar_pro != htons(ETH_P_IP) ||
486 arp->ar_hln != ETH_ALEN ||
487 arp->ar_pln != 4 ||
488 (arp->ar_op != htons(ARPOP_REPLY) &&
489 arp->ar_op != htons(ARPOP_REQUEST)))
490 return FLOW_DISSECT_RET_OUT_BAD;
491
492 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
493 sizeof(_arp_eth), data,
494 hlen, &_arp_eth);
495 if (!arp_eth)
496 return FLOW_DISSECT_RET_OUT_BAD;
497
498 key_arp = skb_flow_dissector_target(flow_dissector,
499 FLOW_DISSECTOR_KEY_ARP,
500 target_container);
501
502 memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
503 memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
504
505 /* Only store the lower byte of the opcode;
506 * this covers ARPOP_REPLY and ARPOP_REQUEST.
507 */
508 key_arp->op = ntohs(arp->ar_op) & 0xff;
509
510 ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
511 ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
512
513 return FLOW_DISSECT_RET_OUT_GOOD;
514}
515
Jiri Pirko7c92de82017-03-06 16:39:55 +0100516static enum flow_dissect_ret
517__skb_flow_dissect_gre(const struct sk_buff *skb,
518 struct flow_dissector_key_control *key_control,
519 struct flow_dissector *flow_dissector,
520 void *target_container, void *data,
521 __be16 *p_proto, int *p_nhoff, int *p_hlen,
522 unsigned int flags)
523{
524 struct flow_dissector_key_keyid *key_keyid;
525 struct gre_base_hdr *hdr, _hdr;
526 int offset = 0;
527 u16 gre_ver;
528
529 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
530 data, *p_hlen, &_hdr);
531 if (!hdr)
532 return FLOW_DISSECT_RET_OUT_BAD;
533
534 /* Only look inside GRE without routing */
535 if (hdr->flags & GRE_ROUTING)
536 return FLOW_DISSECT_RET_OUT_GOOD;
537
538 /* Only look inside GRE for version 0 and 1 */
539 gre_ver = ntohs(hdr->flags & GRE_VERSION);
540 if (gre_ver > 1)
541 return FLOW_DISSECT_RET_OUT_GOOD;
542
543 *p_proto = hdr->protocol;
544 if (gre_ver) {
545 /* Version1 must be PPTP, and check the flags */
546 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
547 return FLOW_DISSECT_RET_OUT_GOOD;
548 }
549
550 offset += sizeof(struct gre_base_hdr);
551
552 if (hdr->flags & GRE_CSUM)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800553 offset += sizeof_field(struct gre_full_hdr, csum) +
554 sizeof_field(struct gre_full_hdr, reserved1);
Jiri Pirko7c92de82017-03-06 16:39:55 +0100555
556 if (hdr->flags & GRE_KEY) {
557 const __be32 *keyid;
558 __be32 _keyid;
559
560 keyid = __skb_header_pointer(skb, *p_nhoff + offset,
561 sizeof(_keyid),
562 data, *p_hlen, &_keyid);
563 if (!keyid)
564 return FLOW_DISSECT_RET_OUT_BAD;
565
566 if (dissector_uses_key(flow_dissector,
567 FLOW_DISSECTOR_KEY_GRE_KEYID)) {
568 key_keyid = skb_flow_dissector_target(flow_dissector,
569 FLOW_DISSECTOR_KEY_GRE_KEYID,
570 target_container);
571 if (gre_ver == 0)
572 key_keyid->keyid = *keyid;
573 else
574 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
575 }
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800576 offset += sizeof_field(struct gre_full_hdr, key);
Jiri Pirko7c92de82017-03-06 16:39:55 +0100577 }
578
579 if (hdr->flags & GRE_SEQ)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800580 offset += sizeof_field(struct pptp_gre_header, seq);
Jiri Pirko7c92de82017-03-06 16:39:55 +0100581
582 if (gre_ver == 0) {
583 if (*p_proto == htons(ETH_P_TEB)) {
584 const struct ethhdr *eth;
585 struct ethhdr _eth;
586
587 eth = __skb_header_pointer(skb, *p_nhoff + offset,
588 sizeof(_eth),
589 data, *p_hlen, &_eth);
590 if (!eth)
591 return FLOW_DISSECT_RET_OUT_BAD;
592 *p_proto = eth->h_proto;
593 offset += sizeof(*eth);
594
595 /* Cap headers that we access via pointers at the
596 * end of the Ethernet header as our maximum alignment
597 * at that point is only 2 bytes.
598 */
599 if (NET_IP_ALIGN)
600 *p_hlen = *p_nhoff + offset;
601 }
602 } else { /* version 1, must be PPTP */
603 u8 _ppp_hdr[PPP_HDRLEN];
604 u8 *ppp_hdr;
605
606 if (hdr->flags & GRE_ACK)
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800607 offset += sizeof_field(struct pptp_gre_header, ack);
Jiri Pirko7c92de82017-03-06 16:39:55 +0100608
609 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
610 sizeof(_ppp_hdr),
611 data, *p_hlen, _ppp_hdr);
612 if (!ppp_hdr)
613 return FLOW_DISSECT_RET_OUT_BAD;
614
615 switch (PPP_PROTOCOL(ppp_hdr)) {
616 case PPP_IP:
617 *p_proto = htons(ETH_P_IP);
618 break;
619 case PPP_IPV6:
620 *p_proto = htons(ETH_P_IPV6);
621 break;
622 default:
623 /* Could probably catch some more like MPLS */
624 break;
625 }
626
627 offset += PPP_HDRLEN;
628 }
629
630 *p_nhoff += offset;
631 key_control->flags |= FLOW_DIS_ENCAPSULATION;
632 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
633 return FLOW_DISSECT_RET_OUT_GOOD;
634
Tom Herbert3a1214e2017-09-01 14:04:11 -0700635 return FLOW_DISSECT_RET_PROTO_AGAIN;
Jiri Pirko7c92de82017-03-06 16:39:55 +0100636}
637
Sven Eckelmann5b0890a2017-12-21 10:17:42 +0100638/**
639 * __skb_flow_dissect_batadv() - dissect batman-adv header
640 * @skb: sk_buff to with the batman-adv header
641 * @key_control: flow dissectors control key
642 * @data: raw buffer pointer to the packet, if NULL use skb->data
643 * @p_proto: pointer used to update the protocol to process next
644 * @p_nhoff: pointer used to update inner network header offset
645 * @hlen: packet header length
646 * @flags: any combination of FLOW_DISSECTOR_F_*
647 *
648 * ETH_P_BATMAN packets are tried to be dissected. Only
649 * &struct batadv_unicast packets are actually processed because they contain an
650 * inner ethernet header and are usually followed by actual network header. This
651 * allows the flow dissector to continue processing the packet.
652 *
653 * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
654 * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
655 * otherwise FLOW_DISSECT_RET_OUT_BAD
656 */
657static enum flow_dissect_ret
658__skb_flow_dissect_batadv(const struct sk_buff *skb,
659 struct flow_dissector_key_control *key_control,
660 void *data, __be16 *p_proto, int *p_nhoff, int hlen,
661 unsigned int flags)
662{
663 struct {
664 struct batadv_unicast_packet batadv_unicast;
665 struct ethhdr eth;
666 } *hdr, _hdr;
667
668 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
669 &_hdr);
670 if (!hdr)
671 return FLOW_DISSECT_RET_OUT_BAD;
672
673 if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
674 return FLOW_DISSECT_RET_OUT_BAD;
675
676 if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
677 return FLOW_DISSECT_RET_OUT_BAD;
678
679 *p_proto = hdr->eth.h_proto;
680 *p_nhoff += sizeof(*hdr);
681
682 key_control->flags |= FLOW_DIS_ENCAPSULATION;
683 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
684 return FLOW_DISSECT_RET_OUT_GOOD;
685
686 return FLOW_DISSECT_RET_PROTO_AGAIN;
687}
688
Jiri Pirkoac4bb5d2017-05-23 18:40:44 +0200689static void
690__skb_flow_dissect_tcp(const struct sk_buff *skb,
691 struct flow_dissector *flow_dissector,
692 void *target_container, void *data, int thoff, int hlen)
693{
694 struct flow_dissector_key_tcp *key_tcp;
695 struct tcphdr *th, _th;
696
697 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
698 return;
699
700 th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
701 if (!th)
702 return;
703
704 if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
705 return;
706
707 key_tcp = skb_flow_dissector_target(flow_dissector,
708 FLOW_DISSECTOR_KEY_TCP,
709 target_container);
710 key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
711}
712
Or Gerlitz518d8a22017-06-01 21:37:37 +0300713static void
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +0900714__skb_flow_dissect_ports(const struct sk_buff *skb,
715 struct flow_dissector *flow_dissector,
716 void *target_container, void *data, int nhoff,
717 u8 ip_proto, int hlen)
718{
719 enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
720 struct flow_dissector_key_ports *key_ports;
721
722 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
723 dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
724 else if (dissector_uses_key(flow_dissector,
725 FLOW_DISSECTOR_KEY_PORTS_RANGE))
726 dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
727
728 if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
729 return;
730
731 key_ports = skb_flow_dissector_target(flow_dissector,
732 dissector_ports,
733 target_container);
734 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
735 data, hlen);
736}
737
738static void
Or Gerlitz518d8a22017-06-01 21:37:37 +0300739__skb_flow_dissect_ipv4(const struct sk_buff *skb,
740 struct flow_dissector *flow_dissector,
741 void *target_container, void *data, const struct iphdr *iph)
742{
743 struct flow_dissector_key_ip *key_ip;
744
745 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
746 return;
747
748 key_ip = skb_flow_dissector_target(flow_dissector,
749 FLOW_DISSECTOR_KEY_IP,
750 target_container);
751 key_ip->tos = iph->tos;
752 key_ip->ttl = iph->ttl;
753}
754
755static void
756__skb_flow_dissect_ipv6(const struct sk_buff *skb,
757 struct flow_dissector *flow_dissector,
758 void *target_container, void *data, const struct ipv6hdr *iph)
759{
760 struct flow_dissector_key_ip *key_ip;
761
762 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
763 return;
764
765 key_ip = skb_flow_dissector_target(flow_dissector,
766 FLOW_DISSECTOR_KEY_IP,
767 target_container);
768 key_ip->tos = ipv6_get_dsfield(iph);
769 key_ip->ttl = iph->hop_limit;
770}
771
Tom Herbert1eed4df2017-09-01 14:04:12 -0700772/* Maximum number of protocol headers that can be parsed in
773 * __skb_flow_dissect
774 */
775#define MAX_FLOW_DISSECT_HDRS 15
776
777static bool skb_flow_dissect_allowed(int *num_hdrs)
778{
779 ++*num_hdrs;
780
781 return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
782}
783
Petar Penkovd58e4682018-09-14 07:46:18 -0700784static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
785 struct flow_dissector *flow_dissector,
786 void *target_container)
787{
Yoshiki Komachi59fb9b62020-01-17 16:05:32 +0900788 struct flow_dissector_key_ports *key_ports = NULL;
Petar Penkovd58e4682018-09-14 07:46:18 -0700789 struct flow_dissector_key_control *key_control;
790 struct flow_dissector_key_basic *key_basic;
791 struct flow_dissector_key_addrs *key_addrs;
Stanislav Fomichev71c99e32019-07-25 15:52:30 -0700792 struct flow_dissector_key_tags *key_tags;
Petar Penkovd58e4682018-09-14 07:46:18 -0700793
794 key_control = skb_flow_dissector_target(flow_dissector,
795 FLOW_DISSECTOR_KEY_CONTROL,
796 target_container);
797 key_control->thoff = flow_keys->thoff;
798 if (flow_keys->is_frag)
799 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
800 if (flow_keys->is_first_frag)
801 key_control->flags |= FLOW_DIS_FIRST_FRAG;
802 if (flow_keys->is_encap)
803 key_control->flags |= FLOW_DIS_ENCAPSULATION;
804
805 key_basic = skb_flow_dissector_target(flow_dissector,
806 FLOW_DISSECTOR_KEY_BASIC,
807 target_container);
808 key_basic->n_proto = flow_keys->n_proto;
809 key_basic->ip_proto = flow_keys->ip_proto;
810
811 if (flow_keys->addr_proto == ETH_P_IP &&
812 dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
813 key_addrs = skb_flow_dissector_target(flow_dissector,
814 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
815 target_container);
816 key_addrs->v4addrs.src = flow_keys->ipv4_src;
817 key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
818 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
819 } else if (flow_keys->addr_proto == ETH_P_IPV6 &&
820 dissector_uses_key(flow_dissector,
821 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
822 key_addrs = skb_flow_dissector_target(flow_dissector,
823 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
824 target_container);
825 memcpy(&key_addrs->v6addrs, &flow_keys->ipv6_src,
826 sizeof(key_addrs->v6addrs));
827 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
828 }
829
Yoshiki Komachi59fb9b62020-01-17 16:05:32 +0900830 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
Petar Penkovd58e4682018-09-14 07:46:18 -0700831 key_ports = skb_flow_dissector_target(flow_dissector,
832 FLOW_DISSECTOR_KEY_PORTS,
833 target_container);
Yoshiki Komachi59fb9b62020-01-17 16:05:32 +0900834 else if (dissector_uses_key(flow_dissector,
835 FLOW_DISSECTOR_KEY_PORTS_RANGE))
836 key_ports = skb_flow_dissector_target(flow_dissector,
837 FLOW_DISSECTOR_KEY_PORTS_RANGE,
838 target_container);
839
840 if (key_ports) {
Petar Penkovd58e4682018-09-14 07:46:18 -0700841 key_ports->src = flow_keys->sport;
842 key_ports->dst = flow_keys->dport;
843 }
Stanislav Fomichev71c99e32019-07-25 15:52:30 -0700844
845 if (dissector_uses_key(flow_dissector,
846 FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
847 key_tags = skb_flow_dissector_target(flow_dissector,
848 FLOW_DISSECTOR_KEY_FLOW_LABEL,
849 target_container);
850 key_tags->flow_label = ntohl(flow_keys->flow_label);
851 }
Petar Penkovd58e4682018-09-14 07:46:18 -0700852}
853
Stanislav Fomichev089b19a2019-04-22 08:55:44 -0700854bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
Stanislav Fomichev086f9562019-07-25 15:52:25 -0700855 __be16 proto, int nhoff, int hlen, unsigned int flags)
Stanislav Fomichev089b19a2019-04-22 08:55:44 -0700856{
857 struct bpf_flow_keys *flow_keys = ctx->flow_keys;
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800858 u32 result;
859
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800860 /* Pass parameters to the BPF program */
861 memset(flow_keys, 0, sizeof(*flow_keys));
Stanislav Fomichev089b19a2019-04-22 08:55:44 -0700862 flow_keys->n_proto = proto;
863 flow_keys->nhoff = nhoff;
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800864 flow_keys->thoff = flow_keys->nhoff;
865
Stanislav Fomichev086f9562019-07-25 15:52:25 -0700866 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG !=
867 (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG);
868 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL !=
869 (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
870 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP !=
871 (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
872 flow_keys->flags = flags;
873
David Miller3d9f773c2020-02-24 15:01:43 +0100874 result = bpf_prog_run_pin_on_cpu(prog, ctx);
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800875
Stanislav Fomichev089b19a2019-04-22 08:55:44 -0700876 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800877 flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
Stanislav Fomichev089b19a2019-04-22 08:55:44 -0700878 flow_keys->nhoff, hlen);
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800879
880 return result == BPF_OK;
881}
882
WANG Cong453a9402014-08-25 17:03:47 -0700883/**
884 * __skb_flow_dissect - extract the flow_keys struct and return it
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -0700885 * @net: associated network namespace, derived from @skb if NULL
WANG Cong453a9402014-08-25 17:03:47 -0700886 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
Jiri Pirko06635a32015-05-12 14:56:16 +0200887 * @flow_dissector: list of keys to dissect
888 * @target_container: target structure to put dissected values into
WANG Cong453a9402014-08-25 17:03:47 -0700889 * @data: raw buffer pointer to the packet, if NULL use skb->data
890 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
891 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
892 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
Bart Van Assched79b3ba2019-03-25 09:17:21 -0700893 * @flags: flags that control the dissection process, e.g.
Stanislav Fomichev1cc26452019-05-31 14:05:06 -0700894 * FLOW_DISSECTOR_F_STOP_AT_ENCAP.
WANG Cong453a9402014-08-25 17:03:47 -0700895 *
Jiri Pirko06635a32015-05-12 14:56:16 +0200896 * The function will try to retrieve individual keys into target specified
897 * by flow_dissector from either the skbuff or a raw buffer specified by the
898 * rest parameters.
899 *
900 * Caller must take care of zeroing target container memory.
WANG Cong453a9402014-08-25 17:03:47 -0700901 */
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -0700902bool __skb_flow_dissect(const struct net *net,
903 const struct sk_buff *skb,
Jiri Pirko06635a32015-05-12 14:56:16 +0200904 struct flow_dissector *flow_dissector,
905 void *target_container,
Tom Herbertcd79a232015-09-01 09:24:27 -0700906 void *data, __be16 proto, int nhoff, int hlen,
907 unsigned int flags)
Eric Dumazet0744dd02011-11-28 05:22:18 +0000908{
Tom Herbert42aecaa2015-06-04 09:16:39 -0700909 struct flow_dissector_key_control *key_control;
Jiri Pirko06635a32015-05-12 14:56:16 +0200910 struct flow_dissector_key_basic *key_basic;
911 struct flow_dissector_key_addrs *key_addrs;
Tom Herbertd34af822015-06-04 09:16:43 -0700912 struct flow_dissector_key_tags *key_tags;
Hadar Hen Zionf6a66922016-08-17 13:36:11 +0300913 struct flow_dissector_key_vlan *key_vlan;
Tom Herbert3a1214e2017-09-01 14:04:11 -0700914 enum flow_dissect_ret fdret;
Jianbo Liu24c590e2018-07-06 05:38:14 +0000915 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
Guillaume Nault58cff782020-05-26 14:29:00 +0200916 bool mpls_el = false;
917 int mpls_lse = 0;
Tom Herbert1eed4df2017-09-01 14:04:12 -0700918 int num_hdrs = 0;
Geert Uytterhoeven8e690ff2015-06-25 15:10:32 +0200919 u8 ip_proto = 0;
Eric Dumazet34fad542016-11-09 16:04:46 -0800920 bool ret;
Eric Dumazet0744dd02011-11-28 05:22:18 +0000921
David S. Miller690e36e2014-08-23 12:13:41 -0700922 if (!data) {
923 data = skb->data;
Hadar Hen Ziond5709f72016-08-17 13:36:10 +0300924 proto = skb_vlan_tag_present(skb) ?
925 skb->vlan_proto : skb->protocol;
WANG Cong453a9402014-08-25 17:03:47 -0700926 nhoff = skb_network_offset(skb);
David S. Miller690e36e2014-08-23 12:13:41 -0700927 hlen = skb_headlen(skb);
John Crispin2d5716452017-08-10 10:09:03 +0200928#if IS_ENABLED(CONFIG_NET_DSA)
Alexander Lobakin8bef0af2019-12-05 13:02:35 +0300929 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
930 proto == htons(ETH_P_XDSA))) {
John Crispin43e66522017-08-09 14:41:19 +0200931 const struct dsa_device_ops *ops;
Alexander Lobakin8bef0af2019-12-05 13:02:35 +0300932 int offset = 0;
John Crispin43e66522017-08-09 14:41:19 +0200933
934 ops = skb->dev->dsa_ptr->tag_ops;
935 if (ops->flow_dissect &&
936 !ops->flow_dissect(skb, &proto, &offset)) {
937 hlen -= offset;
938 nhoff += offset;
939 }
940 }
John Crispin2d5716452017-08-10 10:09:03 +0200941#endif
David S. Miller690e36e2014-08-23 12:13:41 -0700942 }
943
Tom Herbert42aecaa2015-06-04 09:16:39 -0700944 /* It is ensured by skb_flow_dissector_init() that control key will
945 * be always present.
946 */
947 key_control = skb_flow_dissector_target(flow_dissector,
948 FLOW_DISSECTOR_KEY_CONTROL,
949 target_container);
950
Jiri Pirko06635a32015-05-12 14:56:16 +0200951 /* It is ensured by skb_flow_dissector_init() that basic key will
952 * be always present.
953 */
954 key_basic = skb_flow_dissector_target(flow_dissector,
955 FLOW_DISSECTOR_KEY_BASIC,
956 target_container);
Eric Dumazet0744dd02011-11-28 05:22:18 +0000957
Willem de Bruijnd0e13a12018-09-24 16:49:57 -0400958 if (skb) {
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -0700959 if (!net) {
960 if (skb->dev)
961 net = dev_net(skb->dev);
962 else if (skb->sk)
963 net = sock_net(skb->sk);
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -0700964 }
Stanislav Fomichev9b52e3f2019-04-22 08:55:47 -0700965 }
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800966
Stanislav Fomichev9b52e3f2019-04-22 08:55:47 -0700967 WARN_ON_ONCE(!net);
968 if (net) {
Jakub Sitnickia3fd7ce2020-05-31 10:28:36 +0200969 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
Jakub Sitnicki695c1212020-06-25 16:13:55 +0200970 struct bpf_prog_array *run_array;
Jakub Sitnickia3fd7ce2020-05-31 10:28:36 +0200971
Stanislav Fomichev9b52e3f2019-04-22 08:55:47 -0700972 rcu_read_lock();
Jakub Sitnicki695c1212020-06-25 16:13:55 +0200973 run_array = rcu_dereference(init_net.bpf.run_array[type]);
974 if (!run_array)
975 run_array = rcu_dereference(net->bpf.run_array[type]);
Stanislav Fomicheva11c3972019-10-07 09:21:02 -0700976
Jakub Sitnicki695c1212020-06-25 16:13:55 +0200977 if (run_array) {
Stanislav Fomichev9b52e3f2019-04-22 08:55:47 -0700978 struct bpf_flow_keys flow_keys;
979 struct bpf_flow_dissector ctx = {
980 .flow_keys = &flow_keys,
981 .data = data,
982 .data_end = data + hlen,
983 };
984 __be16 n_proto = proto;
Jakub Sitnicki695c1212020-06-25 16:13:55 +0200985 struct bpf_prog *prog;
Stanislav Fomichev9b52e3f2019-04-22 08:55:47 -0700986
987 if (skb) {
988 ctx.skb = skb;
989 /* we can't use 'proto' in the skb case
990 * because it might be set to skb->vlan_proto
991 * which has been pulled from the data
992 */
993 n_proto = skb->protocol;
994 }
995
Jakub Sitnicki695c1212020-06-25 16:13:55 +0200996 prog = READ_ONCE(run_array->items[0].prog);
997 ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
Stanislav Fomichev086f9562019-07-25 15:52:25 -0700998 hlen, flags);
Stanislav Fomichevc8aa7032019-01-28 08:53:53 -0800999 __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
1000 target_container);
1001 rcu_read_unlock();
1002 return ret;
1003 }
Petar Penkovd58e4682018-09-14 07:46:18 -07001004 rcu_read_unlock();
Petar Penkovd58e4682018-09-14 07:46:18 -07001005 }
Petar Penkovd58e4682018-09-14 07:46:18 -07001006
David S. Miller20a17bf2015-09-01 21:19:17 -07001007 if (dissector_uses_key(flow_dissector,
1008 FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
Jiri Pirko67a900c2015-05-12 14:56:19 +02001009 struct ethhdr *eth = eth_hdr(skb);
1010 struct flow_dissector_key_eth_addrs *key_eth_addrs;
1011
1012 key_eth_addrs = skb_flow_dissector_target(flow_dissector,
1013 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1014 target_container);
1015 memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
1016 }
1017
Jiri Pirkoc5ef1882017-03-06 16:39:54 +01001018proto_again:
Tom Herbert3a1214e2017-09-01 14:04:11 -07001019 fdret = FLOW_DISSECT_RET_CONTINUE;
1020
Eric Dumazet0744dd02011-11-28 05:22:18 +00001021 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -07001022 case htons(ETH_P_IP): {
Eric Dumazet0744dd02011-11-28 05:22:18 +00001023 const struct iphdr *iph;
1024 struct iphdr _iph;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001025
David S. Miller690e36e2014-08-23 12:13:41 -07001026 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001027 if (!iph || iph->ihl < 5) {
1028 fdret = FLOW_DISSECT_RET_OUT_BAD;
1029 break;
1030 }
1031
Eric Dumazet3797d3e2013-11-07 08:37:28 -08001032 nhoff += iph->ihl * 4;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001033
Eric Dumazet3797d3e2013-11-07 08:37:28 -08001034 ip_proto = iph->protocol;
Eric Dumazet3797d3e2013-11-07 08:37:28 -08001035
Alexander Duyck918c0232016-02-24 09:29:38 -08001036 if (dissector_uses_key(flow_dissector,
1037 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1038 key_addrs = skb_flow_dissector_target(flow_dissector,
1039 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1040 target_container);
Tom Herbertc3f83242015-06-04 09:16:40 -07001041
Alexander Duyck918c0232016-02-24 09:29:38 -08001042 memcpy(&key_addrs->v4addrs, &iph->saddr,
1043 sizeof(key_addrs->v4addrs));
1044 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1045 }
Tom Herbert807e1652015-09-01 09:24:28 -07001046
1047 if (ip_is_fragment(iph)) {
David S. Miller4b369932015-09-01 16:46:08 -07001048 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
Tom Herbert807e1652015-09-01 09:24:28 -07001049
1050 if (iph->frag_off & htons(IP_OFFSET)) {
Tom Herbert3a1214e2017-09-01 14:04:11 -07001051 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1052 break;
Tom Herbert807e1652015-09-01 09:24:28 -07001053 } else {
David S. Miller4b369932015-09-01 16:46:08 -07001054 key_control->flags |= FLOW_DIS_FIRST_FRAG;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001055 if (!(flags &
1056 FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
1057 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1058 break;
1059 }
Tom Herbert807e1652015-09-01 09:24:28 -07001060 }
1061 }
1062
Or Gerlitz518d8a22017-06-01 21:37:37 +03001063 __skb_flow_dissect_ipv4(skb, flow_dissector,
1064 target_container, data, iph);
1065
Eric Dumazet0744dd02011-11-28 05:22:18 +00001066 break;
1067 }
Joe Perches2b8837a2014-03-12 10:04:17 -07001068 case htons(ETH_P_IPV6): {
Eric Dumazet0744dd02011-11-28 05:22:18 +00001069 const struct ipv6hdr *iph;
1070 struct ipv6hdr _iph;
Tom Herbert19469a82014-07-01 21:33:01 -07001071
David S. Miller690e36e2014-08-23 12:13:41 -07001072 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001073 if (!iph) {
1074 fdret = FLOW_DISSECT_RET_OUT_BAD;
1075 break;
1076 }
Eric Dumazet0744dd02011-11-28 05:22:18 +00001077
1078 ip_proto = iph->nexthdr;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001079 nhoff += sizeof(struct ipv6hdr);
Tom Herbert19469a82014-07-01 21:33:01 -07001080
David S. Miller20a17bf2015-09-01 21:19:17 -07001081 if (dissector_uses_key(flow_dissector,
1082 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
Alexander Duyckb3c31062016-02-24 09:29:57 -08001083 key_addrs = skb_flow_dissector_target(flow_dissector,
1084 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1085 target_container);
Alexander Duyck5af7fb62014-10-10 12:09:12 -07001086
Alexander Duyckb3c31062016-02-24 09:29:57 -08001087 memcpy(&key_addrs->v6addrs, &iph->saddr,
1088 sizeof(key_addrs->v6addrs));
Tom Herbertc3f83242015-06-04 09:16:40 -07001089 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
Jiri Pirkob9249332015-05-12 14:56:18 +02001090 }
Tom Herbert87ee9e52015-06-04 09:16:44 -07001091
Alexander Duyck461547f2016-02-09 02:49:54 -08001092 if ((dissector_uses_key(flow_dissector,
1093 FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
1094 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
1095 ip6_flowlabel(iph)) {
1096 __be32 flow_label = ip6_flowlabel(iph);
1097
David S. Miller20a17bf2015-09-01 21:19:17 -07001098 if (dissector_uses_key(flow_dissector,
1099 FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
Tom Herbert87ee9e52015-06-04 09:16:44 -07001100 key_tags = skb_flow_dissector_target(flow_dissector,
1101 FLOW_DISSECTOR_KEY_FLOW_LABEL,
1102 target_container);
1103 key_tags->flow_label = ntohl(flow_label);
Jiri Pirko12c227e2015-05-22 11:05:58 +02001104 }
Tom Herbert3a1214e2017-09-01 14:04:11 -07001105 if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
1106 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1107 break;
1108 }
Tom Herbert19469a82014-07-01 21:33:01 -07001109 }
1110
Or Gerlitz518d8a22017-06-01 21:37:37 +03001111 __skb_flow_dissect_ipv6(skb, flow_dissector,
1112 target_container, data, iph);
1113
Eric Dumazet0744dd02011-11-28 05:22:18 +00001114 break;
1115 }
Joe Perches2b8837a2014-03-12 10:04:17 -07001116 case htons(ETH_P_8021AD):
1117 case htons(ETH_P_8021Q): {
Jianbo Liu24c590e2018-07-06 05:38:14 +00001118 const struct vlan_hdr *vlan = NULL;
Arnd Bergmannbc72f3d2016-10-24 23:40:30 +02001119 struct vlan_hdr _vlan;
Jianbo Liu2064c3d2018-07-06 05:38:12 +00001120 __be16 saved_vlan_tpid = proto;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001121
Jianbo Liu24c590e2018-07-06 05:38:14 +00001122 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
1123 skb && skb_vlan_tag_present(skb)) {
Hadar Hen Ziond5709f72016-08-17 13:36:10 +03001124 proto = skb->protocol;
Jianbo Liu24c590e2018-07-06 05:38:14 +00001125 } else {
Hadar Hen Ziond5709f72016-08-17 13:36:10 +03001126 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
1127 data, hlen, &_vlan);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001128 if (!vlan) {
1129 fdret = FLOW_DISSECT_RET_OUT_BAD;
1130 break;
1131 }
1132
Hadar Hen Ziond5709f72016-08-17 13:36:10 +03001133 proto = vlan->h_vlan_encapsulated_proto;
1134 nhoff += sizeof(*vlan);
Hadar Hen Ziond5709f72016-08-17 13:36:10 +03001135 }
1136
Jianbo Liu24c590e2018-07-06 05:38:14 +00001137 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
1138 dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
1139 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
1140 dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
1141 } else {
1142 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1143 break;
1144 }
1145
1146 if (dissector_uses_key(flow_dissector, dissector_vlan)) {
Hadar Hen Zionf6a66922016-08-17 13:36:11 +03001147 key_vlan = skb_flow_dissector_target(flow_dissector,
Jianbo Liu24c590e2018-07-06 05:38:14 +00001148 dissector_vlan,
Tom Herbertd34af822015-06-04 09:16:43 -07001149 target_container);
1150
Jianbo Liu24c590e2018-07-06 05:38:14 +00001151 if (!vlan) {
Hadar Hen Zionf6a66922016-08-17 13:36:11 +03001152 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
Michał Mirosław9b319142018-11-07 18:07:03 +01001153 key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
Hadar Hen Zionf6a66922016-08-17 13:36:11 +03001154 } else {
1155 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
Hadar Hen Ziond5709f72016-08-17 13:36:10 +03001156 VLAN_VID_MASK;
Hadar Hen Zionf6a66922016-08-17 13:36:11 +03001157 key_vlan->vlan_priority =
1158 (ntohs(vlan->h_vlan_TCI) &
1159 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1160 }
Jianbo Liu2064c3d2018-07-06 05:38:12 +00001161 key_vlan->vlan_tpid = saved_vlan_tpid;
Tom Herbertd34af822015-06-04 09:16:43 -07001162 }
1163
Tom Herbert3a1214e2017-09-01 14:04:11 -07001164 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1165 break;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001166 }
Joe Perches2b8837a2014-03-12 10:04:17 -07001167 case htons(ETH_P_PPP_SES): {
Eric Dumazet0744dd02011-11-28 05:22:18 +00001168 struct {
1169 struct pppoe_hdr hdr;
1170 __be16 proto;
1171 } *hdr, _hdr;
David S. Miller690e36e2014-08-23 12:13:41 -07001172 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001173 if (!hdr) {
1174 fdret = FLOW_DISSECT_RET_OUT_BAD;
1175 break;
1176 }
1177
Eric Dumazet0744dd02011-11-28 05:22:18 +00001178 proto = hdr->proto;
1179 nhoff += PPPOE_SES_HLEN;
1180 switch (proto) {
Joe Perches2b8837a2014-03-12 10:04:17 -07001181 case htons(PPP_IP):
Tom Herbert3a1214e2017-09-01 14:04:11 -07001182 proto = htons(ETH_P_IP);
1183 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1184 break;
Joe Perches2b8837a2014-03-12 10:04:17 -07001185 case htons(PPP_IPV6):
Tom Herbert3a1214e2017-09-01 14:04:11 -07001186 proto = htons(ETH_P_IPV6);
1187 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1188 break;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001189 default:
Tom Herbert3a1214e2017-09-01 14:04:11 -07001190 fdret = FLOW_DISSECT_RET_OUT_BAD;
1191 break;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001192 }
Tom Herbert3a1214e2017-09-01 14:04:11 -07001193 break;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001194 }
Erik Hugne08bfc9c2015-01-22 17:10:32 +01001195 case htons(ETH_P_TIPC): {
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001196 struct tipc_basic_hdr *hdr, _hdr;
1197
1198 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
1199 data, hlen, &_hdr);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001200 if (!hdr) {
1201 fdret = FLOW_DISSECT_RET_OUT_BAD;
1202 break;
1203 }
Jiri Pirko06635a32015-05-12 14:56:16 +02001204
David S. Miller20a17bf2015-09-01 21:19:17 -07001205 if (dissector_uses_key(flow_dissector,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001206 FLOW_DISSECTOR_KEY_TIPC)) {
Jiri Pirko06635a32015-05-12 14:56:16 +02001207 key_addrs = skb_flow_dissector_target(flow_dissector,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001208 FLOW_DISSECTOR_KEY_TIPC,
Jiri Pirko06635a32015-05-12 14:56:16 +02001209 target_container);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001210 key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
1211 key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
Jiri Pirko06635a32015-05-12 14:56:16 +02001212 }
Tom Herbert3a1214e2017-09-01 14:04:11 -07001213 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1214 break;
Erik Hugne08bfc9c2015-01-22 17:10:32 +01001215 }
Tom Herbertb3baa0f2015-06-04 09:16:46 -07001216
1217 case htons(ETH_P_MPLS_UC):
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +01001218 case htons(ETH_P_MPLS_MC):
Tom Herbert3a1214e2017-09-01 14:04:11 -07001219 fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
Jiri Pirko4a5d6c8b2017-03-06 16:39:52 +01001220 target_container, data,
Guillaume Nault58cff782020-05-26 14:29:00 +02001221 nhoff, hlen, mpls_lse,
1222 &mpls_el);
1223 nhoff += sizeof(struct mpls_label);
1224 mpls_lse++;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001225 break;
Alexander Duyck56193d12014-09-05 19:20:26 -04001226 case htons(ETH_P_FCOE):
Tom Herbert3a1214e2017-09-01 14:04:11 -07001227 if ((hlen - nhoff) < FCOE_HEADER_LEN) {
1228 fdret = FLOW_DISSECT_RET_OUT_BAD;
1229 break;
1230 }
Alexander Duyck224516b2016-02-24 09:29:51 -08001231
1232 nhoff += FCOE_HEADER_LEN;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001233 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1234 break;
Simon Horman55733352017-01-11 14:05:42 +01001235
1236 case htons(ETH_P_ARP):
Jiri Pirko9bf881f2017-03-06 16:39:51 +01001237 case htons(ETH_P_RARP):
Tom Herbert3a1214e2017-09-01 14:04:11 -07001238 fdret = __skb_flow_dissect_arp(skb, flow_dissector,
Jiri Pirko9bf881f2017-03-06 16:39:51 +01001239 target_container, data,
Tom Herbert3a1214e2017-09-01 14:04:11 -07001240 nhoff, hlen);
1241 break;
1242
Sven Eckelmann5b0890a2017-12-21 10:17:42 +01001243 case htons(ETH_P_BATMAN):
1244 fdret = __skb_flow_dissect_batadv(skb, key_control, data,
1245 &proto, &nhoff, hlen, flags);
1246 break;
1247
Tom Herbert3a1214e2017-09-01 14:04:11 -07001248 default:
1249 fdret = FLOW_DISSECT_RET_OUT_BAD;
1250 break;
1251 }
1252
1253 /* Process result of proto processing */
1254 switch (fdret) {
1255 case FLOW_DISSECT_RET_OUT_GOOD:
1256 goto out_good;
1257 case FLOW_DISSECT_RET_PROTO_AGAIN:
Tom Herbert1eed4df2017-09-01 14:04:12 -07001258 if (skb_flow_dissect_allowed(&num_hdrs))
1259 goto proto_again;
1260 goto out_good;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001261 case FLOW_DISSECT_RET_CONTINUE:
1262 case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1263 break;
1264 case FLOW_DISSECT_RET_OUT_BAD:
Eric Dumazet0744dd02011-11-28 05:22:18 +00001265 default:
Tom Herberta6e544b2015-09-01 09:24:26 -07001266 goto out_bad;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001267 }
1268
Tom Herbert6a74fcf2015-06-12 09:01:06 -07001269ip_proto_again:
Tom Herbert3a1214e2017-09-01 14:04:11 -07001270 fdret = FLOW_DISSECT_RET_CONTINUE;
1271
Eric Dumazet0744dd02011-11-28 05:22:18 +00001272 switch (ip_proto) {
Jiri Pirko7c92de82017-03-06 16:39:55 +01001273 case IPPROTO_GRE:
Tom Herbert3a1214e2017-09-01 14:04:11 -07001274 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
Jiri Pirko7c92de82017-03-06 16:39:55 +01001275 target_container, data,
Tom Herbert3a1214e2017-09-01 14:04:11 -07001276 &proto, &nhoff, &hlen, flags);
1277 break;
1278
Tom Herbert6a74fcf2015-06-12 09:01:06 -07001279 case NEXTHDR_HOP:
1280 case NEXTHDR_ROUTING:
1281 case NEXTHDR_DEST: {
1282 u8 _opthdr[2], *opthdr;
1283
1284 if (proto != htons(ETH_P_IPV6))
1285 break;
1286
1287 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
1288 data, hlen, &_opthdr);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001289 if (!opthdr) {
1290 fdret = FLOW_DISSECT_RET_OUT_BAD;
1291 break;
1292 }
Tom Herbert6a74fcf2015-06-12 09:01:06 -07001293
Eric Dumazet1e98a0f2015-06-12 19:31:32 -07001294 ip_proto = opthdr[0];
1295 nhoff += (opthdr[1] + 1) << 3;
Tom Herbert6a74fcf2015-06-12 09:01:06 -07001296
Tom Herbert3a1214e2017-09-01 14:04:11 -07001297 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1298 break;
Tom Herbert6a74fcf2015-06-12 09:01:06 -07001299 }
Tom Herbertb840f282015-09-01 09:24:29 -07001300 case NEXTHDR_FRAGMENT: {
1301 struct frag_hdr _fh, *fh;
1302
1303 if (proto != htons(ETH_P_IPV6))
1304 break;
1305
1306 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
1307 data, hlen, &_fh);
1308
Tom Herbert3a1214e2017-09-01 14:04:11 -07001309 if (!fh) {
1310 fdret = FLOW_DISSECT_RET_OUT_BAD;
1311 break;
1312 }
Tom Herbertb840f282015-09-01 09:24:29 -07001313
David S. Miller4b369932015-09-01 16:46:08 -07001314 key_control->flags |= FLOW_DIS_IS_FRAGMENT;
Tom Herbertb840f282015-09-01 09:24:29 -07001315
1316 nhoff += sizeof(_fh);
Alexander Duyck43d2ccb2016-02-24 09:29:44 -08001317 ip_proto = fh->nexthdr;
Tom Herbertb840f282015-09-01 09:24:29 -07001318
1319 if (!(fh->frag_off & htons(IP6_OFFSET))) {
David S. Miller4b369932015-09-01 16:46:08 -07001320 key_control->flags |= FLOW_DIS_FIRST_FRAG;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001321 if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
1322 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1323 break;
1324 }
Tom Herbertb840f282015-09-01 09:24:29 -07001325 }
Tom Herbert3a1214e2017-09-01 14:04:11 -07001326
1327 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1328 break;
Tom Herbertb840f282015-09-01 09:24:29 -07001329 }
Eric Dumazet0744dd02011-11-28 05:22:18 +00001330 case IPPROTO_IPIP:
Tom Herbertfca41892013-07-29 11:07:36 -07001331 proto = htons(ETH_P_IP);
Tom Herbert823b9692015-09-01 09:24:32 -07001332
David S. Miller4b369932015-09-01 16:46:08 -07001333 key_control->flags |= FLOW_DIS_ENCAPSULATION;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001334 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1335 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1336 break;
1337 }
Tom Herbert823b9692015-09-01 09:24:32 -07001338
Tom Herbert3a1214e2017-09-01 14:04:11 -07001339 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1340 break;
1341
Tom Herbertb438f942013-07-29 11:07:42 -07001342 case IPPROTO_IPV6:
1343 proto = htons(ETH_P_IPV6);
Tom Herbert823b9692015-09-01 09:24:32 -07001344
David S. Miller4b369932015-09-01 16:46:08 -07001345 key_control->flags |= FLOW_DIS_ENCAPSULATION;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001346 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1347 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1348 break;
1349 }
Tom Herbert823b9692015-09-01 09:24:32 -07001350
Tom Herbert3a1214e2017-09-01 14:04:11 -07001351 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1352 break;
1353
1354
Tom Herbertb3baa0f2015-06-04 09:16:46 -07001355 case IPPROTO_MPLS:
1356 proto = htons(ETH_P_MPLS_UC);
Tom Herbert3a1214e2017-09-01 14:04:11 -07001357 fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1358 break;
1359
Jiri Pirkoac4bb5d2017-05-23 18:40:44 +02001360 case IPPROTO_TCP:
1361 __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1362 data, nhoff, hlen);
1363 break;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001364
Matteo Croce3b336d6f2019-10-29 14:50:51 +01001365 case IPPROTO_ICMP:
1366 case IPPROTO_ICMPV6:
1367 __skb_flow_dissect_icmp(skb, flow_dissector, target_container,
1368 data, nhoff, hlen);
1369 break;
1370
Eric Dumazet0744dd02011-11-28 05:22:18 +00001371 default:
1372 break;
1373 }
1374
Yoshiki Komachi8ffb0552019-12-03 19:40:12 +09001375 if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
1376 __skb_flow_dissect_ports(skb, flow_dissector, target_container,
1377 data, nhoff, ip_proto, hlen);
Alexander Duyck5af7fb62014-10-10 12:09:12 -07001378
Tom Herbert3a1214e2017-09-01 14:04:11 -07001379 /* Process result of IP proto processing */
1380 switch (fdret) {
1381 case FLOW_DISSECT_RET_PROTO_AGAIN:
Tom Herbert1eed4df2017-09-01 14:04:12 -07001382 if (skb_flow_dissect_allowed(&num_hdrs))
1383 goto proto_again;
1384 break;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001385 case FLOW_DISSECT_RET_IPPROTO_AGAIN:
Tom Herbert1eed4df2017-09-01 14:04:12 -07001386 if (skb_flow_dissect_allowed(&num_hdrs))
1387 goto ip_proto_again;
1388 break;
Tom Herbert3a1214e2017-09-01 14:04:11 -07001389 case FLOW_DISSECT_RET_OUT_GOOD:
1390 case FLOW_DISSECT_RET_CONTINUE:
1391 break;
1392 case FLOW_DISSECT_RET_OUT_BAD:
1393 default:
1394 goto out_bad;
1395 }
1396
Tom Herberta6e544b2015-09-01 09:24:26 -07001397out_good:
1398 ret = true;
1399
Eric Dumazet34fad542016-11-09 16:04:46 -08001400out:
Eric Dumazetd0c081b2018-01-17 14:21:13 -08001401 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
Tom Herberta6e544b2015-09-01 09:24:26 -07001402 key_basic->n_proto = proto;
1403 key_basic->ip_proto = ip_proto;
Tom Herberta6e544b2015-09-01 09:24:26 -07001404
1405 return ret;
Eric Dumazet34fad542016-11-09 16:04:46 -08001406
1407out_bad:
1408 ret = false;
Eric Dumazet34fad542016-11-09 16:04:46 -08001409 goto out;
Eric Dumazet0744dd02011-11-28 05:22:18 +00001410}
David S. Miller690e36e2014-08-23 12:13:41 -07001411EXPORT_SYMBOL(__skb_flow_dissect);
Cong Wang441d9d32013-01-21 00:39:24 +00001412
Eric Dumazet55667442019-10-22 07:57:46 -07001413static siphash_key_t hashrnd __read_mostly;
Hannes Frederic Sowa66415cf2013-10-23 20:06:00 +02001414static __always_inline void __flow_hash_secret_init(void)
1415{
1416 net_get_random_once(&hashrnd, sizeof(hashrnd));
1417}
1418
Eric Dumazet55667442019-10-22 07:57:46 -07001419static const void *flow_keys_hash_start(const struct flow_keys *flow)
Hannes Frederic Sowa66415cf2013-10-23 20:06:00 +02001420{
Eric Dumazet55667442019-10-22 07:57:46 -07001421 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
1422 return &flow->FLOW_KEYS_HASH_START_FIELD;
Tom Herbert42aecaa2015-06-04 09:16:39 -07001423}
1424
David S. Miller20a17bf2015-09-01 21:19:17 -07001425static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
Tom Herbert42aecaa2015-06-04 09:16:39 -07001426{
Tom Herbertc3f83242015-06-04 09:16:40 -07001427 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
David S. Millerd31e9552019-11-02 13:12:51 -07001428
Tom Herbert42aecaa2015-06-04 09:16:39 -07001429 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
Tom Herbertc3f83242015-06-04 09:16:40 -07001430
1431 switch (flow->control.addr_type) {
1432 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1433 diff -= sizeof(flow->addrs.v4addrs);
1434 break;
1435 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1436 diff -= sizeof(flow->addrs.v6addrs);
1437 break;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001438 case FLOW_DISSECTOR_KEY_TIPC:
1439 diff -= sizeof(flow->addrs.tipckey);
Tom Herbert9f249082015-06-04 09:16:41 -07001440 break;
Tom Herbertc3f83242015-06-04 09:16:40 -07001441 }
Eric Dumazet55667442019-10-22 07:57:46 -07001442 return sizeof(*flow) - diff;
Tom Herbertc3f83242015-06-04 09:16:40 -07001443}
1444
1445__be32 flow_get_u32_src(const struct flow_keys *flow)
1446{
1447 switch (flow->control.addr_type) {
1448 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1449 return flow->addrs.v4addrs.src;
1450 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1451 return (__force __be32)ipv6_addr_hash(
1452 &flow->addrs.v6addrs.src);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001453 case FLOW_DISSECTOR_KEY_TIPC:
1454 return flow->addrs.tipckey.key;
Tom Herbertc3f83242015-06-04 09:16:40 -07001455 default:
1456 return 0;
1457 }
1458}
1459EXPORT_SYMBOL(flow_get_u32_src);
1460
1461__be32 flow_get_u32_dst(const struct flow_keys *flow)
1462{
1463 switch (flow->control.addr_type) {
1464 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1465 return flow->addrs.v4addrs.dst;
1466 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1467 return (__force __be32)ipv6_addr_hash(
1468 &flow->addrs.v6addrs.dst);
1469 default:
1470 return 0;
1471 }
1472}
1473EXPORT_SYMBOL(flow_get_u32_dst);
1474
Matteo Croce98298e62019-10-29 14:50:50 +01001475/* Sort the source and destination IP (and the ports if the IP are the same),
1476 * to have consistent hash within the two directions
1477 */
Tom Herbertc3f83242015-06-04 09:16:40 -07001478static inline void __flow_hash_consistentify(struct flow_keys *keys)
1479{
1480 int addr_diff, i;
1481
1482 switch (keys->control.addr_type) {
1483 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1484 addr_diff = (__force u32)keys->addrs.v4addrs.dst -
1485 (__force u32)keys->addrs.v4addrs.src;
1486 if ((addr_diff < 0) ||
1487 (addr_diff == 0 &&
1488 ((__force u16)keys->ports.dst <
1489 (__force u16)keys->ports.src))) {
1490 swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1491 swap(keys->ports.src, keys->ports.dst);
1492 }
1493 break;
1494 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1495 addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1496 &keys->addrs.v6addrs.src,
1497 sizeof(keys->addrs.v6addrs.dst));
1498 if ((addr_diff < 0) ||
1499 (addr_diff == 0 &&
1500 ((__force u16)keys->ports.dst <
1501 (__force u16)keys->ports.src))) {
1502 for (i = 0; i < 4; i++)
1503 swap(keys->addrs.v6addrs.src.s6_addr32[i],
1504 keys->addrs.v6addrs.dst.s6_addr32[i]);
1505 swap(keys->ports.src, keys->ports.dst);
1506 }
1507 break;
1508 }
Hannes Frederic Sowa66415cf2013-10-23 20:06:00 +02001509}
1510
Eric Dumazet55667442019-10-22 07:57:46 -07001511static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
1512 const siphash_key_t *keyval)
Tom Herbert5ed20a62014-07-01 21:32:05 -07001513{
1514 u32 hash;
1515
Tom Herbertc3f83242015-06-04 09:16:40 -07001516 __flow_hash_consistentify(keys);
Tom Herbert5ed20a62014-07-01 21:32:05 -07001517
Eric Dumazet55667442019-10-22 07:57:46 -07001518 hash = siphash(flow_keys_hash_start(keys),
1519 flow_keys_hash_length(keys), keyval);
Tom Herbert5ed20a62014-07-01 21:32:05 -07001520 if (!hash)
1521 hash = 1;
1522
1523 return hash;
1524}
1525
1526u32 flow_hash_from_keys(struct flow_keys *keys)
1527{
Tom Herbert50fb7992015-05-01 11:30:12 -07001528 __flow_hash_secret_init();
Eric Dumazet55667442019-10-22 07:57:46 -07001529 return __flow_hash_from_keys(keys, &hashrnd);
Tom Herbert5ed20a62014-07-01 21:32:05 -07001530}
1531EXPORT_SYMBOL(flow_hash_from_keys);
1532
Tom Herbert50fb7992015-05-01 11:30:12 -07001533static inline u32 ___skb_get_hash(const struct sk_buff *skb,
Eric Dumazet55667442019-10-22 07:57:46 -07001534 struct flow_keys *keys,
1535 const siphash_key_t *keyval)
Tom Herbert50fb7992015-05-01 11:30:12 -07001536{
Tom Herbert6db61d72015-09-01 09:24:33 -07001537 skb_flow_dissect_flow_keys(skb, keys,
1538 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
Tom Herbert50fb7992015-05-01 11:30:12 -07001539
1540 return __flow_hash_from_keys(keys, keyval);
1541}
1542
Tom Herbert2f59e1e2015-05-01 11:30:17 -07001543struct _flow_keys_digest_data {
1544 __be16 n_proto;
1545 u8 ip_proto;
1546 u8 padding;
1547 __be32 ports;
1548 __be32 src;
1549 __be32 dst;
1550};
1551
1552void make_flow_keys_digest(struct flow_keys_digest *digest,
1553 const struct flow_keys *flow)
1554{
1555 struct _flow_keys_digest_data *data =
1556 (struct _flow_keys_digest_data *)digest;
1557
1558 BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1559
1560 memset(digest, 0, sizeof(*digest));
1561
Jiri Pirko06635a32015-05-12 14:56:16 +02001562 data->n_proto = flow->basic.n_proto;
1563 data->ip_proto = flow->basic.ip_proto;
1564 data->ports = flow->ports.ports;
Tom Herbertc3f83242015-06-04 09:16:40 -07001565 data->src = flow->addrs.v4addrs.src;
1566 data->dst = flow->addrs.v4addrs.dst;
Tom Herbert2f59e1e2015-05-01 11:30:17 -07001567}
1568EXPORT_SYMBOL(make_flow_keys_digest);
1569
David S. Millereb70db82016-07-01 16:07:50 -04001570static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1571
Florian Westphalb9177832016-10-26 18:49:46 +02001572u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
David S. Millereb70db82016-07-01 16:07:50 -04001573{
1574 struct flow_keys keys;
1575
1576 __flow_hash_secret_init();
1577
1578 memset(&keys, 0, sizeof(keys));
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001579 __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
1580 &keys, NULL, 0, 0, 0,
David S. Millereb70db82016-07-01 16:07:50 -04001581 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1582
Eric Dumazet55667442019-10-22 07:57:46 -07001583 return __flow_hash_from_keys(&keys, &hashrnd);
David S. Millereb70db82016-07-01 16:07:50 -04001584}
1585EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1586
Jiri Pirkod4fd3272015-05-12 14:56:10 +02001587/**
1588 * __skb_get_hash: calculate a flow hash
1589 * @skb: sk_buff to calculate flow hash from
1590 *
1591 * This function calculates a flow hash based on src/dst addresses
Tom Herbert61b905d2014-03-24 15:34:47 -07001592 * and src/dst port numbers. Sets hash in skb to non-zero hash value
1593 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
Cong Wang441d9d32013-01-21 00:39:24 +00001594 * if hash is a canonical 4-tuple hash over transport ports.
1595 */
Tom Herbert3958afa1b2013-12-15 22:12:06 -08001596void __skb_get_hash(struct sk_buff *skb)
Cong Wang441d9d32013-01-21 00:39:24 +00001597{
1598 struct flow_keys keys;
Gao Feng635c2232016-08-31 14:15:05 +08001599 u32 hash;
Cong Wang441d9d32013-01-21 00:39:24 +00001600
Tom Herbert50fb7992015-05-01 11:30:12 -07001601 __flow_hash_secret_init();
1602
Eric Dumazet55667442019-10-22 07:57:46 -07001603 hash = ___skb_get_hash(skb, &keys, &hashrnd);
Gao Feng635c2232016-08-31 14:15:05 +08001604
1605 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
Cong Wang441d9d32013-01-21 00:39:24 +00001606}
Tom Herbert3958afa1b2013-12-15 22:12:06 -08001607EXPORT_SYMBOL(__skb_get_hash);
Cong Wang441d9d32013-01-21 00:39:24 +00001608
Eric Dumazet55667442019-10-22 07:57:46 -07001609__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1610 const siphash_key_t *perturb)
Tom Herbert50fb7992015-05-01 11:30:12 -07001611{
1612 struct flow_keys keys;
1613
1614 return ___skb_get_hash(skb, &keys, perturb);
1615}
1616EXPORT_SYMBOL(skb_get_hash_perturb);
1617
Alexander Duyck56193d12014-09-05 19:20:26 -04001618u32 __skb_get_poff(const struct sk_buff *skb, void *data,
Paolo Abeni72a338b2018-05-04 11:32:59 +02001619 const struct flow_keys_basic *keys, int hlen)
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001620{
Tom Herbert42aecaa2015-06-04 09:16:39 -07001621 u32 poff = keys->control.thoff;
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001622
Alexander Duyck43d2ccb2016-02-24 09:29:44 -08001623 /* skip L4 headers for fragments after the first */
1624 if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1625 !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1626 return poff;
1627
Jiri Pirko06635a32015-05-12 14:56:16 +02001628 switch (keys->basic.ip_proto) {
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001629 case IPPROTO_TCP: {
Alexander Duyck5af7fb62014-10-10 12:09:12 -07001630 /* access doff as u8 to avoid unaligned access */
1631 const u8 *doff;
1632 u8 _doff;
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001633
Alexander Duyck5af7fb62014-10-10 12:09:12 -07001634 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1635 data, hlen, &_doff);
1636 if (!doff)
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001637 return poff;
1638
Alexander Duyck5af7fb62014-10-10 12:09:12 -07001639 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
Daniel Borkmannf77668d2013-03-19 06:39:30 +00001640 break;
1641 }
1642 case IPPROTO_UDP:
1643 case IPPROTO_UDPLITE:
1644 poff += sizeof(struct udphdr);
1645 break;
1646 /* For the rest, we do not really care about header
1647 * extensions at this point for now.
1648 */
1649 case IPPROTO_ICMP:
1650 poff += sizeof(struct icmphdr);
1651 break;
1652 case IPPROTO_ICMPV6:
1653 poff += sizeof(struct icmp6hdr);
1654 break;
1655 case IPPROTO_IGMP:
1656 poff += sizeof(struct igmphdr);
1657 break;
1658 case IPPROTO_DCCP:
1659 poff += sizeof(struct dccp_hdr);
1660 break;
1661 case IPPROTO_SCTP:
1662 poff += sizeof(struct sctphdr);
1663 break;
1664 }
1665
1666 return poff;
1667}
1668
Jiri Pirko0db89b82015-05-12 14:56:14 +02001669/**
1670 * skb_get_poff - get the offset to the payload
1671 * @skb: sk_buff to get the payload offset from
1672 *
1673 * The function will get the offset to the payload as far as it could
1674 * be dissected. The main user is currently BPF, so that we can dynamically
Alexander Duyck56193d12014-09-05 19:20:26 -04001675 * truncate packets without needing to push actual payload to the user
1676 * space and can analyze headers only, instead.
1677 */
1678u32 skb_get_poff(const struct sk_buff *skb)
1679{
Paolo Abeni72a338b2018-05-04 11:32:59 +02001680 struct flow_keys_basic keys;
Alexander Duyck56193d12014-09-05 19:20:26 -04001681
Stanislav Fomichev3cbf4ff2019-04-22 08:55:46 -07001682 if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
1683 NULL, 0, 0, 0, 0))
Alexander Duyck56193d12014-09-05 19:20:26 -04001684 return 0;
1685
1686 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1687}
Jiri Pirko06635a32015-05-12 14:56:16 +02001688
David S. Miller20a17bf2015-09-01 21:19:17 -07001689__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
David S. Millera17ace92015-09-01 17:00:24 -07001690{
1691 memset(keys, 0, sizeof(*keys));
1692
1693 memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1694 sizeof(keys->addrs.v6addrs.src));
1695 memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1696 sizeof(keys->addrs.v6addrs.dst));
1697 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1698 keys->ports.src = fl6->fl6_sport;
1699 keys->ports.dst = fl6->fl6_dport;
1700 keys->keyid.keyid = fl6->fl6_gre_key;
Michal Kubecekfa1be7e2018-06-04 11:36:05 +02001701 keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
David S. Millera17ace92015-09-01 17:00:24 -07001702 keys->basic.ip_proto = fl6->flowi6_proto;
1703
1704 return flow_hash_from_keys(keys);
1705}
1706EXPORT_SYMBOL(__get_hash_from_flowi6);
1707
Jiri Pirko06635a32015-05-12 14:56:16 +02001708static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1709 {
Tom Herbert42aecaa2015-06-04 09:16:39 -07001710 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1711 .offset = offsetof(struct flow_keys, control),
1712 },
1713 {
Jiri Pirko06635a32015-05-12 14:56:16 +02001714 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1715 .offset = offsetof(struct flow_keys, basic),
1716 },
1717 {
1718 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
Tom Herbertc3f83242015-06-04 09:16:40 -07001719 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1720 },
1721 {
1722 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1723 .offset = offsetof(struct flow_keys, addrs.v6addrs),
Jiri Pirko06635a32015-05-12 14:56:16 +02001724 },
1725 {
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001726 .key_id = FLOW_DISSECTOR_KEY_TIPC,
1727 .offset = offsetof(struct flow_keys, addrs.tipckey),
Tom Herbert9f249082015-06-04 09:16:41 -07001728 },
1729 {
Jiri Pirko06635a32015-05-12 14:56:16 +02001730 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1731 .offset = offsetof(struct flow_keys, ports),
1732 },
Tom Herbertd34af822015-06-04 09:16:43 -07001733 {
Hadar Hen Zionf6a66922016-08-17 13:36:11 +03001734 .key_id = FLOW_DISSECTOR_KEY_VLAN,
1735 .offset = offsetof(struct flow_keys, vlan),
Tom Herbertd34af822015-06-04 09:16:43 -07001736 },
Tom Herbert87ee9e52015-06-04 09:16:44 -07001737 {
1738 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1739 .offset = offsetof(struct flow_keys, tags),
1740 },
Tom Herbert1fdd5122015-06-04 09:16:45 -07001741 {
1742 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1743 .offset = offsetof(struct flow_keys, keyid),
1744 },
Jiri Pirko06635a32015-05-12 14:56:16 +02001745};
1746
David S. Millereb70db82016-07-01 16:07:50 -04001747static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1748 {
1749 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1750 .offset = offsetof(struct flow_keys, control),
1751 },
1752 {
1753 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1754 .offset = offsetof(struct flow_keys, basic),
1755 },
1756 {
1757 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1758 .offset = offsetof(struct flow_keys, addrs.v4addrs),
1759 },
1760 {
1761 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1762 .offset = offsetof(struct flow_keys, addrs.v6addrs),
1763 },
1764 {
1765 .key_id = FLOW_DISSECTOR_KEY_PORTS,
1766 .offset = offsetof(struct flow_keys, ports),
1767 },
1768};
1769
Paolo Abeni72a338b2018-05-04 11:32:59 +02001770static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
Jiri Pirko06635a32015-05-12 14:56:16 +02001771 {
Tom Herbert42aecaa2015-06-04 09:16:39 -07001772 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1773 .offset = offsetof(struct flow_keys, control),
1774 },
1775 {
Jiri Pirko06635a32015-05-12 14:56:16 +02001776 .key_id = FLOW_DISSECTOR_KEY_BASIC,
1777 .offset = offsetof(struct flow_keys, basic),
1778 },
1779};
1780
1781struct flow_dissector flow_keys_dissector __read_mostly;
1782EXPORT_SYMBOL(flow_keys_dissector);
1783
Paolo Abeni72a338b2018-05-04 11:32:59 +02001784struct flow_dissector flow_keys_basic_dissector __read_mostly;
1785EXPORT_SYMBOL(flow_keys_basic_dissector);
Jiri Pirko06635a32015-05-12 14:56:16 +02001786
1787static int __init init_default_flow_dissectors(void)
1788{
1789 skb_flow_dissector_init(&flow_keys_dissector,
1790 flow_keys_dissector_keys,
1791 ARRAY_SIZE(flow_keys_dissector_keys));
David S. Millereb70db82016-07-01 16:07:50 -04001792 skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1793 flow_keys_dissector_symmetric_keys,
1794 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
Paolo Abeni72a338b2018-05-04 11:32:59 +02001795 skb_flow_dissector_init(&flow_keys_basic_dissector,
1796 flow_keys_basic_dissector_keys,
1797 ARRAY_SIZE(flow_keys_basic_dissector_keys));
Jakub Sitnickib27f7bb2020-05-31 10:28:37 +02001798 return 0;
Jakub Sitnicki5cf65922020-05-21 10:34:35 +02001799}
Eric Dumazetc9b8af12016-11-22 11:17:30 -08001800core_initcall(init_default_flow_dissectors);