blob: b8137953fea31a377b3f5cc4aae99db932ad1ed4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Tom Herbert8024e022014-07-13 19:49:37 -07002#ifndef __NET_UDP_TUNNEL_H
3#define __NET_UDP_TUNNEL_H
4
Andy Zhou6a93cc92014-09-16 17:31:17 -07005#include <net/ip_tunnels.h>
6#include <net/udp.h>
7
8#if IS_ENABLED(CONFIG_IPV6)
9#include <net/ipv6.h>
10#include <net/addrconf.h>
11#endif
12
Tom Herbert8024e022014-07-13 19:49:37 -070013struct udp_port_cfg {
14 u8 family;
15
16 /* Used only for kernel-created sockets */
17 union {
18 struct in_addr local_ip;
19#if IS_ENABLED(CONFIG_IPV6)
20 struct in6_addr local_ip6;
21#endif
22 };
23
24 union {
25 struct in_addr peer_ip;
26#if IS_ENABLED(CONFIG_IPV6)
27 struct in6_addr peer_ip6;
28#endif
29 };
30
31 __be16 local_udp_port;
32 __be16 peer_udp_port;
Alexis Bauvinda5095d052018-12-03 10:54:38 +010033 int bind_ifindex;
Tom Herbert8024e022014-07-13 19:49:37 -070034 unsigned int use_udp_checksums:1,
35 use_udp6_tx_checksums:1,
Jiri Benca43a9ef2015-08-28 20:48:22 +020036 use_udp6_rx_checksums:1,
37 ipv6_v6only:1;
Tom Herbert8024e022014-07-13 19:49:37 -070038};
39
Andy Zhoufd384412014-09-16 17:31:16 -070040int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
41 struct socket **sockp);
42
43#if IS_ENABLED(CONFIG_IPV6)
44int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
45 struct socket **sockp);
46#else
47static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
48 struct socket **sockp)
49{
50 return 0;
51}
52#endif
53
54static inline int udp_sock_create(struct net *net,
55 struct udp_port_cfg *cfg,
56 struct socket **sockp)
57{
58 if (cfg->family == AF_INET)
59 return udp_sock_create4(net, cfg, sockp);
60
61 if (cfg->family == AF_INET6)
62 return udp_sock_create6(net, cfg, sockp);
63
64 return -EPFNOSUPPORT;
65}
Tom Herbert8024e022014-07-13 19:49:37 -070066
Andy Zhou6a93cc92014-09-16 17:31:17 -070067typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
Stefano Brivioa36e1852018-11-08 12:19:14 +010068typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
69 struct sk_buff *skb);
Andy Zhou6a93cc92014-09-16 17:31:17 -070070typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
David Millerd4546c22018-06-24 14:13:49 +090071typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
72 struct list_head *head,
73 struct sk_buff *skb);
Tom Herbert38fd2af2016-04-05 08:22:52 -070074typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
75 int nhoff);
Andy Zhou6a93cc92014-09-16 17:31:17 -070076
77struct udp_tunnel_sock_cfg {
78 void *sk_user_data; /* user data used by encap_rcv call back */
79 /* Used for setting up udp_sock fields, see udp.h for details */
80 __u8 encap_type;
81 udp_tunnel_encap_rcv_t encap_rcv;
Stefano Brivioa36e1852018-11-08 12:19:14 +010082 udp_tunnel_encap_err_lookup_t encap_err_lookup;
Andy Zhou6a93cc92014-09-16 17:31:17 -070083 udp_tunnel_encap_destroy_t encap_destroy;
Tom Herbert38fd2af2016-04-05 08:22:52 -070084 udp_tunnel_gro_receive_t gro_receive;
85 udp_tunnel_gro_complete_t gro_complete;
Andy Zhou6a93cc92014-09-16 17:31:17 -070086};
87
88/* Setup the given (UDP) sock to receive UDP encapsulated packets */
89void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
90 struct udp_tunnel_sock_cfg *sock_cfg);
91
Alexander Duycke7b3db52016-06-16 12:20:52 -070092/* -- List of parsable UDP tunnel types --
93 *
94 * Adding to this list will result in serious debate. The main issue is
95 * that this list is essentially a list of workarounds for either poorly
96 * designed tunnels, or poorly designed device offloads.
97 *
98 * The parsing supported via these types should really be used for Rx
99 * traffic only as the network stack will have already inserted offsets for
100 * the location of the headers in the skb. In addition any ports that are
101 * pushed should be kept within the namespace without leaking to other
102 * devices such as VFs or other ports on the same device.
103 *
104 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
105 * need to use this for Rx checksum offload. It should not be necessary to
106 * call this function to perform Tx offloads on outgoing traffic.
107 */
108enum udp_parsable_tunnel_type {
109 UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */
110 UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */
Alexander Duyckb9adcd692016-06-16 12:23:19 -0700111 UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */
Alexander Duycke7b3db52016-06-16 12:20:52 -0700112};
113
114struct udp_tunnel_info {
115 unsigned short type;
116 sa_family_t sa_family;
117 __be16 port;
118};
119
120/* Notify network devices of offloadable types */
121void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
122 unsigned short type);
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200123void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
124 unsigned short type);
Alexander Duycke7b3db52016-06-16 12:20:52 -0700125void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
126void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
127
Alexander Duyck7c46a642016-06-16 12:21:00 -0700128static inline void udp_tunnel_get_rx_info(struct net_device *dev)
129{
130 ASSERT_RTNL();
131 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
132}
133
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200134static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
135{
136 ASSERT_RTNL();
137 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
138}
139
Andy Zhou6a93cc92014-09-16 17:31:17 -0700140/* Transmit the skb using UDP encapsulation. */
Pravin B Shelar039f5062015-12-24 14:34:54 -0800141void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
142 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
143 __be16 df, __be16 src_port, __be16 dst_port,
144 bool xnet, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700145
146#if IS_ENABLED(CONFIG_IPV6)
David Miller79b16aa2015-04-05 22:19:09 -0400147int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
148 struct sk_buff *skb,
Tom Herbertd998f8e2015-01-20 11:23:04 -0800149 struct net_device *dev, struct in6_addr *saddr,
150 struct in6_addr *daddr,
Daniel Borkmann13461142016-03-09 03:00:02 +0100151 __u8 prio, __u8 ttl, __be32 label,
152 __be16 src_port, __be16 dst_port, bool nocheck);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700153#endif
154
155void udp_tunnel_sock_release(struct socket *sock);
156
Pravin B Shelarc29a70d2015-08-26 23:46:50 -0700157struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
158 __be16 flags, __be64 tunnel_id,
159 int md_size);
160
Alexander Duyck86a98052016-06-16 12:20:44 -0700161#ifdef CONFIG_INET
Alexander Duyckaed069d2016-04-14 15:33:37 -0400162static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
Andy Zhou6a93cc92014-09-16 17:31:17 -0700163{
164 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
165
Edward Cree6fa79662016-02-11 21:02:31 +0000166 return iptunnel_handle_offloads(skb, type);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700167}
Alexander Duyck86a98052016-06-16 12:20:44 -0700168#endif
Andy Zhou6a93cc92014-09-16 17:31:17 -0700169
170static inline void udp_tunnel_encap_enable(struct socket *sock)
171{
Paolo Abeni60fb9562018-11-07 12:38:28 +0100172 struct udp_sock *up = udp_sk(sock->sk);
173
174 if (up->encap_enabled)
175 return;
176
177 up->encap_enabled = 1;
Andy Zhou6a93cc92014-09-16 17:31:17 -0700178#if IS_ENABLED(CONFIG_IPV6)
179 if (sock->sk->sk_family == PF_INET6)
180 ipv6_stub->udpv6_encap_enable();
181 else
182#endif
183 udp_encap_enable();
184}
185
Tom Herbert8024e022014-07-13 19:49:37 -0700186#endif