2
* Copyright (c) 2007-2013 Nicira, Inc.
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of version 2 of the GNU General Public
6
* License as published by the Free Software Foundation.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11
* General Public License for more details.
13
* You should have received a copy of the GNU General Public License
14
* along with this program; if not, write to the Free Software
15
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18
* This code is derived from kernel vxlan module.
21
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
#include <linux/kernel.h>
24
#include <linux/types.h>
25
#include <linux/module.h>
26
#include <linux/errno.h>
27
#include <linux/slab.h>
28
#include <linux/skbuff.h>
29
#include <linux/rculist.h>
30
#include <linux/netdevice.h>
33
#include <linux/udp.h>
34
#include <linux/igmp.h>
35
#include <linux/etherdevice.h>
36
#include <linux/if_ether.h>
37
#include <linux/if_vlan.h>
38
#include <linux/hash.h>
39
#include <linux/ethtool.h>
41
#include <net/ndisc.h>
43
#include <net/ip_tunnels.h>
46
#include <net/rtnetlink.h>
47
#include <net/route.h>
48
#include <net/dsfield.h>
49
#include <net/inet_ecn.h>
50
#include <net/net_namespace.h>
51
#include <net/netns/generic.h>
52
#include <net/vxlan.h>
59
#define PORT_HASH_BITS 8
60
#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
62
#define VXLAN_N_VID (1u << 24)
63
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
64
/* IP header + UDP + VXLAN + Ethernet header */
65
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
66
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
68
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
70
/* VXLAN protocol header */
76
static int vxlan_net_id;
78
/* per-network namespace private data for this module */
80
struct hlist_head sock_list[PORT_HASH_SIZE];
81
struct mutex sock_lock; /* RTNL lock nests inside this lock. */
84
/* Socket hash table head */
85
static inline struct hlist_head *vs_head(struct net *net, __be16 port)
87
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
89
return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
92
/* Find VXLAN socket based on network namespace and UDP port */
93
static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
95
struct vxlan_sock *vs;
97
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
98
if (inet_sport(vs->sock->sk) == port)
104
/* Callback from net/ipv4/udp.c to receive packets */
105
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
107
struct vxlan_handler *vh;
108
struct vxlan_sock *vs;
109
struct vxlanhdr *vxh;
111
/* Need Vxlan and inner Ethernet header to be present */
112
if (!pskb_may_pull(skb, VXLAN_HLEN))
115
/* Return packets with reserved bits set */
116
vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
117
if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
118
(vxh->vx_vni & htonl(0xff))) {
119
pr_warn("invalid vxlan flags=%#x vni=%#x\n",
120
ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
124
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
127
vs = vxlan_find_port(sock_net(sk), inet_sport(sk));
131
list_for_each_entry_rcu(vh, &vs->handler_list, node) {
132
if (vh->rcv(vh, skb, vxh->vx_vni) == PACKET_RCVD)
137
/* Consume bad packet */
142
/* Return non vxlan pkt */
146
static void vxlan_sock_put(struct sk_buff *skb)
151
/* On transmit, associate with the tunnel socket */
152
static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
157
skb->destructor = vxlan_sock_put;
160
/* Compute source port for outgoing packet
161
* first choice to use L4 flow hash since it will spread
162
* better and maybe available from hardware
163
* secondary choice is to use jhash on the Ethernet header
165
__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
167
unsigned int range = (port_max - port_min) + 1;
170
hash = skb_get_rxhash(skb);
172
hash = jhash(skb->data, 2 * ETH_ALEN,
173
(__force u32) skb->protocol);
175
return htons((((u64) hash * range) >> 32) + port_min);
178
static void vxlan_gso(struct sk_buff *skb)
180
int udp_offset = skb_transport_offset(skb);
184
uh->len = htons(skb->len - udp_offset);
186
/* csum segment if tunnel sets skb with csum. */
187
if (unlikely(uh->check)) {
188
struct iphdr *iph = ip_hdr(skb);
190
uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
191
skb->len - udp_offset,
193
uh->check = csum_fold(skb_checksum(skb, udp_offset,
194
skb->len - udp_offset, 0));
197
uh->check = CSUM_MANGLED_0;
200
skb->ip_summed = CHECKSUM_NONE;
203
static int handle_offloads(struct sk_buff *skb)
205
if (skb_is_gso(skb)) {
206
OVS_GSO_CB(skb)->fix_segment = vxlan_gso;
208
if (skb->ip_summed != CHECKSUM_PARTIAL)
209
skb->ip_summed = CHECKSUM_NONE;
214
int vxlan_xmit_skb(struct net *net, struct vxlan_handler *vh,
215
struct rtable *rt, struct sk_buff *skb,
216
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
217
__be16 src_port, __be16 dst_port, __be32 vni)
219
struct vxlanhdr *vxh;
224
skb_reset_inner_headers(skb);
226
min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
227
+ VXLAN_HLEN + sizeof(struct iphdr)
228
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
230
/* Need space for new headers (invalidates iph ptr) */
231
err = skb_cow_head(skb, min_headroom);
235
if (unlikely(vlan_deaccel_tag(skb)))
238
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
239
vxh->vx_flags = htonl(VXLAN_FLAGS);
242
__skb_push(skb, sizeof(*uh));
243
skb_reset_transport_header(skb);
247
uh->source = src_port;
249
uh->len = htons(skb->len);
252
vxlan_set_owner(vh->vs->sock->sk, skb);
254
err = handle_offloads(skb);
258
return iptunnel_xmit(net, rt, skb, src, dst,
259
IPPROTO_UDP, tos, ttl, df);
262
static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
264
struct vxlan_sock *vs;
266
struct sockaddr_in vxlan_addr = {
267
.sin_family = AF_INET,
268
.sin_addr.s_addr = htonl(INADDR_ANY),
273
vs = kmalloc(sizeof(*vs), GFP_KERNEL);
275
return ERR_PTR(-ENOMEM);
277
/* Create UDP socket for encapsulation receive. */
278
rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
280
pr_debug("UDP socket create failed\n");
285
/* Put in proper namespace */
287
sk_change_net(sk, net);
289
rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
292
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
293
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
294
sk_release_kernel(sk);
299
/* Disable multicast loopback */
300
inet_sk(sk)->mc_loop = 0;
301
INIT_LIST_HEAD(&vs->handler_list);
302
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
304
/* Mark socket as an encapsulation socket. */
305
udp_sk(sk)->encap_type = 1;
306
udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
312
static void rcu_free_vs_callback(struct rcu_head *rcu)
314
struct vxlan_sock *vs = container_of(rcu, struct vxlan_sock, rcu);
319
static void vxlan_socket_del(struct vxlan_sock *vs)
321
if (list_empty(&vs->handler_list)) {
322
hlist_del_rcu(&vs->hlist);
324
sk_release_kernel(vs->sock->sk);
325
call_rcu(&vs->rcu, rcu_free_vs_callback);
329
static int vxlan_init_module(void);
330
static void vxlan_cleanup_module(void);
332
static void rcu_free_vh_callback(struct rcu_head *rcu)
334
struct vxlan_handler *vh = container_of(rcu, struct vxlan_handler, rcu);
339
static void vh_del_work(struct work_struct *work)
341
struct vxlan_handler *vh = container_of(work, struct vxlan_handler, del_work);
342
struct vxlan_sock *vs = vh->vs;
343
struct net *net = sock_net(vs->sock->sk);
344
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
346
mutex_lock(&vn->sock_lock);
348
list_del_rcu(&vh->node);
349
call_rcu(&vh->rcu, rcu_free_vh_callback);
350
vxlan_socket_del(vs);
352
mutex_unlock(&vn->sock_lock);
354
vxlan_cleanup_module();
357
struct vxlan_handler *vxlan_handler_add(struct net *net,
358
__be16 portno, vxlan_rcv_t *rcv,
359
void *data, int priority, bool create)
361
struct vxlan_net *vn;
362
struct vxlan_sock *vs;
363
struct vxlan_handler *vh;
364
struct vxlan_handler *new;
367
err = vxlan_init_module();
371
vn = net_generic(net, vxlan_net_id);
372
mutex_lock(&vn->sock_lock);
373
/* Look to see if can reuse socket */
374
vs = vxlan_find_port(net, portno);
376
vs = vxlan_socket_create(net, portno);
383
/* Try existing vxlan hanlders for this socket. */
384
list_for_each_entry(vh, &vs->handler_list, node) {
385
if (vh->rcv == rcv) {
387
vxlan_socket_del(vs);
388
new = ERR_PTR(-EEXIST);
391
atomic_inc(&vh->refcnt);
397
new = kzalloc(sizeof(*new), GFP_KERNEL);
399
vxlan_socket_del(vs);
400
new = ERR_PTR(-ENOMEM);
406
atomic_set(&new->refcnt, 1);
407
INIT_WORK(&new->del_work, vh_del_work);
409
new->priority = priority;
411
list_for_each_entry(vh, &vs->handler_list, node) {
412
if (vh->priority > priority) {
413
list_add_tail_rcu(&new->node, &vh->node);
418
list_add_tail_rcu(&new->node, &vs->handler_list);
420
mutex_unlock(&vn->sock_lock);
424
void vxlan_handler_put(struct vxlan_handler *vh)
428
if (atomic_dec_and_test(&vh->refcnt))
429
queue_work(&vh->del_work);
432
static __net_init int vxlan_init_net(struct net *net)
434
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
437
mutex_init(&vn->sock_lock);
439
for (h = 0; h < PORT_HASH_SIZE; ++h)
440
INIT_HLIST_HEAD(&vn->sock_list[h]);
445
static struct pernet_operations vxlan_net_ops = {
446
.init = vxlan_init_net,
448
.size = sizeof(struct vxlan_net),
452
static DEFINE_MUTEX(init_lock);
453
DEFINE_COMPAT_PNET_REG_FUNC(device);
455
static int vxlan_init_module(void)
459
mutex_lock(&init_lock);
462
err = register_pernet_device(&vxlan_net_ops);
466
mutex_unlock(&init_lock);
470
static void vxlan_cleanup_module(void)
472
mutex_lock(&init_lock);
476
unregister_pernet_device(&vxlan_net_ops);
478
mutex_unlock(&init_lock);