blob: 860aff5f85990252607651c173a6d84006e5afe1 [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Tom Herbert8024e022014-07-13 19:49:37 -07002#include <linux/module.h>
3#include <linux/errno.h>
4#include <linux/socket.h>
Tom Herbert8024e022014-07-13 19:49:37 -07005#include <linux/kernel.h>
Pravin B Shelarc29a70d2015-08-26 23:46:50 -07006#include <net/dst_metadata.h>
Tom Herbert8024e022014-07-13 19:49:37 -07007#include <net/udp.h>
8#include <net/udp_tunnel.h>
Tom Herbert8024e022014-07-13 19:49:37 -07009
Andy Zhoufd384412014-09-16 17:31:16 -070010int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
11 struct socket **sockp)
Tom Herbert8024e022014-07-13 19:49:37 -070012{
Andy Zhou6a93cc92014-09-16 17:31:17 -070013 int err;
Tom Herbert8024e022014-07-13 19:49:37 -070014 struct socket *sock = NULL;
Andy Zhoufd384412014-09-16 17:31:16 -070015 struct sockaddr_in udp_addr;
Tom Herbert8024e022014-07-13 19:49:37 -070016
Eric W. Biederman26abe142015-05-08 21:10:31 -050017 err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
Andy Zhoufd384412014-09-16 17:31:16 -070018 if (err < 0)
19 goto error;
Tom Herbert8024e022014-07-13 19:49:37 -070020
Alexis Bauvinda5095d052018-12-03 10:54:38 +010021 if (cfg->bind_ifindex) {
Ferenc Fejes8ea204c2020-05-30 23:09:00 +020022 err = sock_bindtoindex(sock->sk, cfg->bind_ifindex, true);
Alexis Bauvinda5095d052018-12-03 10:54:38 +010023 if (err < 0)
24 goto error;
25 }
26
Andy Zhoufd384412014-09-16 17:31:16 -070027 udp_addr.sin_family = AF_INET;
28 udp_addr.sin_addr = cfg->local_ip;
29 udp_addr.sin_port = cfg->local_udp_port;
30 err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
31 sizeof(udp_addr));
32 if (err < 0)
33 goto error;
Tom Herbert8024e022014-07-13 19:49:37 -070034
Andy Zhoufd384412014-09-16 17:31:16 -070035 if (cfg->peer_udp_port) {
Tom Herbert8024e022014-07-13 19:49:37 -070036 udp_addr.sin_family = AF_INET;
Andy Zhoufd384412014-09-16 17:31:16 -070037 udp_addr.sin_addr = cfg->peer_ip;
38 udp_addr.sin_port = cfg->peer_udp_port;
39 err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
40 sizeof(udp_addr), 0);
Tom Herbert8024e022014-07-13 19:49:37 -070041 if (err < 0)
42 goto error;
Tom Herbert8024e022014-07-13 19:49:37 -070043 }
44
Andy Zhoufd384412014-09-16 17:31:16 -070045 sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
Tom Herbert8024e022014-07-13 19:49:37 -070046
47 *sockp = sock;
Tom Herbert8024e022014-07-13 19:49:37 -070048 return 0;
49
50error:
51 if (sock) {
52 kernel_sock_shutdown(sock, SHUT_RDWR);
Eric W. Biederman26abe142015-05-08 21:10:31 -050053 sock_release(sock);
Tom Herbert8024e022014-07-13 19:49:37 -070054 }
55 *sockp = NULL;
56 return err;
57}
Andy Zhoufd384412014-09-16 17:31:16 -070058EXPORT_SYMBOL(udp_sock_create4);
Tom Herbert8024e022014-07-13 19:49:37 -070059
Andy Zhou6a93cc92014-09-16 17:31:17 -070060void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
61 struct udp_tunnel_sock_cfg *cfg)
62{
63 struct sock *sk = sock->sk;
64
65 /* Disable multicast loopback */
Eric Dumazetb09bde52023-08-16 08:15:39 +000066 inet_clear_bit(MC_LOOP, sk);
Andy Zhou6a93cc92014-09-16 17:31:17 -070067
68 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
Tom Herbert224d0192015-01-05 13:56:14 -080069 inet_inc_convert_csum(sk);
Andy Zhou6a93cc92014-09-16 17:31:17 -070070
71 rcu_assign_sk_user_data(sk, cfg->sk_user_data);
72
73 udp_sk(sk)->encap_type = cfg->encap_type;
74 udp_sk(sk)->encap_rcv = cfg->encap_rcv;
David Howellsac56a0b2022-08-26 15:39:28 +010075 udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
Stefano Brivioa36e1852018-11-08 12:19:14 +010076 udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
Andy Zhou6a93cc92014-09-16 17:31:17 -070077 udp_sk(sk)->encap_destroy = cfg->encap_destroy;
Tom Herbert38fd2af2016-04-05 08:22:52 -070078 udp_sk(sk)->gro_receive = cfg->gro_receive;
79 udp_sk(sk)->gro_complete = cfg->gro_complete;
Andy Zhou6a93cc92014-09-16 17:31:17 -070080
Eric Dumazetac9a7f42023-09-12 09:17:27 +000081 udp_tunnel_encap_enable(sk);
Andy Zhou6a93cc92014-09-16 17:31:17 -070082}
83EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
84
Alexander Duycke7b3db52016-06-16 12:20:52 -070085void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
86 unsigned short type)
87{
88 struct sock *sk = sock->sk;
89 struct udp_tunnel_info ti;
90
91 ti.type = type;
92 ti.sa_family = sk->sk_family;
93 ti.port = inet_sk(sk)->inet_sport;
94
Jakub Kicinski876c4382021-01-06 13:06:34 -080095 udp_tunnel_nic_add_port(dev, &ti);
Alexander Duycke7b3db52016-06-16 12:20:52 -070096}
97EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port);
98
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +020099void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
100 unsigned short type)
101{
102 struct sock *sk = sock->sk;
103 struct udp_tunnel_info ti;
104
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200105 ti.type = type;
106 ti.sa_family = sk->sk_family;
107 ti.port = inet_sk(sk)->inet_sport;
108
Jakub Kicinski876c4382021-01-06 13:06:34 -0800109 udp_tunnel_nic_del_port(dev, &ti);
Sabrina Dubroca296d8ee2017-07-21 12:49:30 +0200110}
111EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port);
112
Alexander Duycke7b3db52016-06-16 12:20:52 -0700113/* Notify netdevs that UDP port started listening */
114void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type)
115{
116 struct sock *sk = sock->sk;
117 struct net *net = sock_net(sk);
118 struct udp_tunnel_info ti;
119 struct net_device *dev;
120
121 ti.type = type;
122 ti.sa_family = sk->sk_family;
123 ti.port = inet_sk(sk)->inet_sport;
124
125 rcu_read_lock();
Alexander Duyck1938ee12016-06-16 12:23:12 -0700126 for_each_netdev_rcu(net, dev) {
Jakub Kicinski876c4382021-01-06 13:06:34 -0800127 udp_tunnel_nic_add_port(dev, &ti);
Alexander Duyck1938ee12016-06-16 12:23:12 -0700128 }
Alexander Duycke7b3db52016-06-16 12:20:52 -0700129 rcu_read_unlock();
130}
131EXPORT_SYMBOL_GPL(udp_tunnel_notify_add_rx_port);
132
Alexander Duycke7b3db52016-06-16 12:20:52 -0700133/* Notify netdevs that UDP port is no more listening */
134void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type)
135{
136 struct sock *sk = sock->sk;
137 struct net *net = sock_net(sk);
138 struct udp_tunnel_info ti;
139 struct net_device *dev;
140
141 ti.type = type;
142 ti.sa_family = sk->sk_family;
143 ti.port = inet_sk(sk)->inet_sport;
144
145 rcu_read_lock();
Alexander Duyck1938ee12016-06-16 12:23:12 -0700146 for_each_netdev_rcu(net, dev) {
Jakub Kicinski876c4382021-01-06 13:06:34 -0800147 udp_tunnel_nic_del_port(dev, &ti);
Alexander Duyck1938ee12016-06-16 12:23:12 -0700148 }
Alexander Duycke7b3db52016-06-16 12:20:52 -0700149 rcu_read_unlock();
150}
151EXPORT_SYMBOL_GPL(udp_tunnel_notify_del_rx_port);
152
Pravin B Shelar039f5062015-12-24 14:34:54 -0800153void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
154 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
155 __be16 df, __be16 src_port, __be16 dst_port,
156 bool xnet, bool nocheck)
Andy Zhou6a93cc92014-09-16 17:31:17 -0700157{
158 struct udphdr *uh;
159
160 __skb_push(skb, sizeof(*uh));
161 skb_reset_transport_header(skb);
162 uh = udp_hdr(skb);
163
164 uh->dest = dst_port;
165 uh->source = src_port;
166 uh->len = htons(skb->len);
167
Bernie Harris5146d1f2016-02-22 12:58:05 +1300168 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
169
Tom Herbertd998f8e2015-01-20 11:23:04 -0800170 udp_set_csum(nocheck, skb, src, dst, skb->len);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700171
Pravin B Shelar039f5062015-12-24 14:34:54 -0800172 iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700173}
174EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
175
176void udp_tunnel_sock_release(struct socket *sock)
177{
178 rcu_assign_sk_user_data(sock->sk, NULL);
Hangbin Liu3cf72032022-12-08 20:04:52 +0800179 synchronize_rcu();
Andy Zhou6a93cc92014-09-16 17:31:17 -0700180 kernel_sock_shutdown(sock, SHUT_RDWR);
Eric W. Biederman26abe142015-05-08 21:10:31 -0500181 sock_release(sock);
Andy Zhou6a93cc92014-09-16 17:31:17 -0700182}
183EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
184
Pravin B Shelarc29a70d2015-08-26 23:46:50 -0700185struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
186 __be16 flags, __be64 tunnel_id, int md_size)
187{
188 struct metadata_dst *tun_dst;
189 struct ip_tunnel_info *info;
190
191 if (family == AF_INET)
192 tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
193 else
194 tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
195 if (!tun_dst)
196 return NULL;
197
198 info = &tun_dst->u.tun_info;
199 info->key.tp_src = udp_hdr(skb)->source;
200 info->key.tp_dst = udp_hdr(skb)->dest;
201 if (udp_hdr(skb)->check)
202 info->key.tun_flags |= TUNNEL_CSUM;
203 return tun_dst;
204}
205EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
206
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200207struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
208 struct net_device *dev,
Beniamino Galvani72fc68c62023-10-16 09:15:22 +0200209 struct net *net, int oif,
210 __be32 *saddr,
211 const struct ip_tunnel_key *key,
212 __be16 sport, __be16 dport, u8 tos,
213 struct dst_cache *dst_cache)
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200214{
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200215 struct rtable *rt = NULL;
216 struct flowi4 fl4;
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200217
218#ifdef CONFIG_DST_CACHE
Beniamino Galvani72fc68c62023-10-16 09:15:22 +0200219 if (dst_cache) {
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200220 rt = dst_cache_get_ip4(dst_cache, saddr);
221 if (rt)
222 return rt;
223 }
224#endif
Beniamino Galvani72fc68c62023-10-16 09:15:22 +0200225
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200226 memset(&fl4, 0, sizeof(fl4));
227 fl4.flowi4_mark = skb->mark;
Beniamino Galvani78f36552023-10-16 09:15:21 +0200228 fl4.flowi4_proto = IPPROTO_UDP;
Beniamino Galvani72fc68c62023-10-16 09:15:22 +0200229 fl4.flowi4_oif = oif;
230 fl4.daddr = key->u.ipv4.dst;
231 fl4.saddr = key->u.ipv4.src;
232 fl4.fl4_dport = dport;
233 fl4.fl4_sport = sport;
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200234 fl4.flowi4_tos = RT_TOS(tos);
Beniamino Galvani3ae983a2023-10-16 09:15:23 +0200235 fl4.flowi4_flags = key->flow_flags;
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200236
237 rt = ip_route_output_key(net, &fl4);
238 if (IS_ERR(rt)) {
239 netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
240 return ERR_PTR(-ENETUNREACH);
241 }
242 if (rt->dst.dev == dev) { /* is this necessary? */
243 netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
244 ip_rt_put(rt);
245 return ERR_PTR(-ELOOP);
246 }
247#ifdef CONFIG_DST_CACHE
Beniamino Galvani72fc68c62023-10-16 09:15:22 +0200248 if (dst_cache)
Beniamino Galvanibf3fcbf2023-10-16 09:15:20 +0200249 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
250#endif
251 *saddr = fl4.saddr;
252 return rt;
253}
254EXPORT_SYMBOL_GPL(udp_tunnel_dst_lookup);
255
Breno Leitaob058a5d2024-02-08 08:42:41 -0800256MODULE_DESCRIPTION("IPv4 Foo over UDP tunnel driver");
Tom Herbert8024e022014-07-13 19:49:37 -0700257MODULE_LICENSE("GPL");