1
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
2
* Patrick Schaaf <bof@bof.de>
3
* Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5
* This program is free software; you can redistribute it and/or modify
6
* it under the terms of the GNU General Public License version 2 as
7
* published by the Free Software Foundation.
10
/* Kernel module for IP set management */
12
#include <linux/version.h>
13
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
14
#include <linux/config.h>
16
#include <linux/module.h>
17
#include <linux/moduleparam.h>
18
#include <linux/kmod.h>
20
#include <linux/skbuff.h>
21
#include <linux/random.h>
22
#include <linux/jhash.h>
23
#include <linux/netfilter_ipv4/ip_tables.h>
24
#include <linux/errno.h>
25
#include <asm/uaccess.h>
26
#include <asm/bitops.h>
27
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
28
# include <linux/semaphore.h>
30
# include <asm/semaphore.h>
32
#include <linux/spinlock.h>
33
#include <linux/vmalloc.h>
35
#define ASSERT_READ_LOCK(x)
36
#define ASSERT_WRITE_LOCK(x)
39
static struct list_head set_type_list; /* all registered sets */
40
static struct ip_set **ip_set_list; /* all individual sets */
41
static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
42
static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
43
static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
44
static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
45
static struct list_head *ip_set_hash; /* hash of bindings */
46
static unsigned int ip_set_hash_random; /* random seed */
48
#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
51
* Sets are identified either by the index in ip_set_list or by id.
52
* The id never changes and is used to find a key in the hash.
53
* The index may change by swapping and used at all other places
54
* (set/SET netfilter modules, binding value, etc.)
56
* Userspace requests are serialized by ip_set_mutex and sets can
57
* be deleted only from userspace. Therefore ip_set_list locking
58
* must obey the following rules:
60
* - kernel requests: read and write locking mandatory
61
* - user requests: read locking optional, write locking mandatory
65
__ip_set_get(ip_set_id_t index)
67
atomic_inc(&ip_set_list[index]->ref);
71
__ip_set_put(ip_set_id_t index)
73
atomic_dec(&ip_set_list[index]->ref);
80
static inline struct ip_set_hash *
81
__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
83
struct ip_set_hash *set_hash;
85
list_for_each_entry(set_hash, &ip_set_hash[key], list)
86
if (set_hash->id == id && set_hash->ip == ip)
93
ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
95
u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
96
% ip_set_bindings_hash_size;
97
struct ip_set_hash *set_hash;
99
ASSERT_READ_LOCK(&ip_set_lock);
100
IP_SET_ASSERT(ip_set_list[id]);
101
DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
103
set_hash = __ip_set_find(key, id, ip);
105
DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
107
set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
109
return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
113
__set_hash_del(struct ip_set_hash *set_hash)
115
ASSERT_WRITE_LOCK(&ip_set_lock);
116
IP_SET_ASSERT(ip_set_list[set_hash->binding]);
118
__ip_set_put(set_hash->binding);
119
list_del(&set_hash->list);
124
ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
126
u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
127
% ip_set_bindings_hash_size;
128
struct ip_set_hash *set_hash;
130
IP_SET_ASSERT(ip_set_list[id]);
131
DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
132
write_lock_bh(&ip_set_lock);
133
set_hash = __ip_set_find(key, id, ip);
134
DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
136
set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
138
if (set_hash != NULL)
139
__set_hash_del(set_hash);
140
write_unlock_bh(&ip_set_lock);
145
ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
147
u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
148
% ip_set_bindings_hash_size;
149
struct ip_set_hash *set_hash;
152
IP_SET_ASSERT(ip_set_list[id]);
153
IP_SET_ASSERT(ip_set_list[binding]);
154
DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
155
HIPQUAD(ip), ip_set_list[binding]->name);
156
write_lock_bh(&ip_set_lock);
157
set_hash = __ip_set_find(key, id, ip);
159
set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
164
INIT_LIST_HEAD(&set_hash->list);
167
list_add(&set_hash->list, &ip_set_hash[key]);
169
IP_SET_ASSERT(ip_set_list[set_hash->binding]);
170
DP("overwrite binding: %s",
171
ip_set_list[set_hash->binding]->name);
172
__ip_set_put(set_hash->binding);
174
set_hash->binding = binding;
175
__ip_set_get(set_hash->binding);
176
DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
177
key, id, ip_set_list[id]->name,
178
HIPQUAD(ip), binding, ip_set_list[binding]->name);
180
write_unlock_bh(&ip_set_lock);
184
#define FOREACH_HASH_DO(fn, args...) \
187
struct ip_set_hash *__set_hash; \
189
for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
190
list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
191
fn(__set_hash , ## args); \
195
#define FOREACH_HASH_RW_DO(fn, args...) \
198
struct ip_set_hash *__set_hash, *__n; \
200
ASSERT_WRITE_LOCK(&ip_set_lock); \
201
for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
202
list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
203
fn(__set_hash , ## args); \
207
/* Add, del and test set entries from kernel */
209
#define follow_bindings(index, set, ip) \
210
((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
211
|| (index = (set)->binding) != IP_SET_INVALID_ID)
214
ip_set_testip_kernel(ip_set_id_t index,
215
const struct sk_buff *skb,
216
const u_int32_t *flags)
223
IP_SET_ASSERT(flags[i]);
224
read_lock_bh(&ip_set_lock);
226
set = ip_set_list[index];
228
DP("set %s, index %u", set->name, index);
229
read_lock_bh(&set->lock);
230
res = set->type->testip_kernel(set, skb, &ip, flags, i++);
231
read_unlock_bh(&set->lock);
232
i += !!(set->type->features & IPSET_DATA_DOUBLE);
235
&& follow_bindings(index, set, ip));
236
read_unlock_bh(&ip_set_lock);
242
ip_set_addip_kernel(ip_set_id_t index,
243
const struct sk_buff *skb,
244
const u_int32_t *flags)
251
IP_SET_ASSERT(flags[i]);
253
read_lock_bh(&ip_set_lock);
255
set = ip_set_list[index];
257
DP("set %s, index %u", set->name, index);
258
write_lock_bh(&set->lock);
259
res = set->type->addip_kernel(set, skb, &ip, flags, i++);
260
write_unlock_bh(&set->lock);
261
i += !!(set->type->features & IPSET_DATA_DOUBLE);
262
} while ((res == 0 || res == -EEXIST)
264
&& follow_bindings(index, set, ip));
265
read_unlock_bh(&ip_set_lock);
269
&& (res = set->type->retry(set)) == 0)
274
ip_set_delip_kernel(ip_set_id_t index,
275
const struct sk_buff *skb,
276
const u_int32_t *flags)
283
IP_SET_ASSERT(flags[i]);
284
read_lock_bh(&ip_set_lock);
286
set = ip_set_list[index];
288
DP("set %s, index %u", set->name, index);
289
write_lock_bh(&set->lock);
290
res = set->type->delip_kernel(set, skb, &ip, flags, i++);
291
write_unlock_bh(&set->lock);
292
i += !!(set->type->features & IPSET_DATA_DOUBLE);
293
} while ((res == 0 || res == -EEXIST)
295
&& follow_bindings(index, set, ip));
296
read_unlock_bh(&ip_set_lock);
299
/* Register and deregister settype */
301
static inline struct ip_set_type *
302
find_set_type(const char *name)
304
struct ip_set_type *set_type;
306
list_for_each_entry(set_type, &set_type_list, list)
307
if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
313
ip_set_register_set_type(struct ip_set_type *set_type)
317
if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
318
ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
320
set_type->protocol_version,
321
IP_SET_PROTOCOL_VERSION);
325
write_lock_bh(&ip_set_lock);
326
if (find_set_type(set_type->typename)) {
328
ip_set_printk("'%s' already registered!",
333
if (!try_module_get(THIS_MODULE)) {
337
list_add(&set_type->list, &set_type_list);
338
DP("'%s' registered.", set_type->typename);
340
write_unlock_bh(&ip_set_lock);
345
ip_set_unregister_set_type(struct ip_set_type *set_type)
347
write_lock_bh(&ip_set_lock);
348
if (!find_set_type(set_type->typename)) {
349
ip_set_printk("'%s' not registered?",
353
list_del(&set_type->list);
354
module_put(THIS_MODULE);
355
DP("'%s' unregistered.", set_type->typename);
357
write_unlock_bh(&ip_set_lock);
366
* Find set by name, reference it once. The reference makes sure the
367
* thing pointed to, does not go away under our feet. Drop the reference
368
* later, using ip_set_put().
371
ip_set_get_byname(const char *name)
373
ip_set_id_t i, index = IP_SET_INVALID_ID;
375
down(&ip_set_app_mutex);
376
for (i = 0; i < ip_set_max; i++) {
377
if (ip_set_list[i] != NULL
378
&& SETNAME_EQ(ip_set_list[i]->name, name)) {
384
up(&ip_set_app_mutex);
389
* Find set by index, reference it once. The reference makes sure the
390
* thing pointed to, does not go away under our feet. Drop the reference
391
* later, using ip_set_put().
394
ip_set_get_byindex(ip_set_id_t index)
396
down(&ip_set_app_mutex);
398
if (index >= ip_set_max)
399
return IP_SET_INVALID_ID;
401
if (ip_set_list[index])
404
index = IP_SET_INVALID_ID;
406
up(&ip_set_app_mutex);
411
* If the given set pointer points to a valid set, decrement
412
* reference count by 1. The caller shall not assume the index
413
* to be valid, after calling this function.
415
void ip_set_put(ip_set_id_t index)
417
down(&ip_set_app_mutex);
418
if (ip_set_list[index])
420
up(&ip_set_app_mutex);
423
/* Find a set by name or index */
425
ip_set_find_byname(const char *name)
427
ip_set_id_t i, index = IP_SET_INVALID_ID;
429
for (i = 0; i < ip_set_max; i++) {
430
if (ip_set_list[i] != NULL
431
&& SETNAME_EQ(ip_set_list[i]->name, name)) {
440
ip_set_find_byindex(ip_set_id_t index)
442
if (index >= ip_set_max || ip_set_list[index] == NULL)
443
index = IP_SET_INVALID_ID;
449
* Add, del, test, bind and unbind
453
__ip_set_testip(struct ip_set *set,
460
read_lock_bh(&set->lock);
461
res = set->type->testip(set, data, size, ip);
462
read_unlock_bh(&set->lock);
468
__ip_set_addip(ip_set_id_t index,
472
struct ip_set *set = ip_set_list[index];
478
write_lock_bh(&set->lock);
479
res = set->type->addip(set, data, size, &ip);
480
write_unlock_bh(&set->lock);
481
} while (res == -EAGAIN
483
&& (res = set->type->retry(set)) == 0);
489
ip_set_addip(ip_set_id_t index,
494
return __ip_set_addip(index,
495
data + sizeof(struct ip_set_req_adt),
496
size - sizeof(struct ip_set_req_adt));
500
ip_set_delip(ip_set_id_t index,
504
struct ip_set *set = ip_set_list[index];
509
write_lock_bh(&set->lock);
510
res = set->type->delip(set,
511
data + sizeof(struct ip_set_req_adt),
512
size - sizeof(struct ip_set_req_adt),
514
write_unlock_bh(&set->lock);
520
ip_set_testip(ip_set_id_t index,
524
struct ip_set *set = ip_set_list[index];
529
res = __ip_set_testip(set,
530
data + sizeof(struct ip_set_req_adt),
531
size - sizeof(struct ip_set_req_adt),
534
return (res > 0 ? -EEXIST : res);
538
ip_set_bindip(ip_set_id_t index,
542
struct ip_set *set = ip_set_list[index];
543
const struct ip_set_req_bind *req_bind;
549
if (size < sizeof(struct ip_set_req_bind))
554
if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
555
/* Default binding of a set */
556
const char *binding_name;
558
if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
561
binding_name = data + sizeof(struct ip_set_req_bind);
563
binding = ip_set_find_byname(binding_name);
564
if (binding == IP_SET_INVALID_ID)
567
write_lock_bh(&ip_set_lock);
568
/* Sets as binding values are referenced */
569
if (set->binding != IP_SET_INVALID_ID)
570
__ip_set_put(set->binding);
571
set->binding = binding;
572
__ip_set_get(set->binding);
573
write_unlock_bh(&ip_set_lock);
577
binding = ip_set_find_byname(req_bind->binding);
578
if (binding == IP_SET_INVALID_ID)
581
res = __ip_set_testip(set,
582
data + sizeof(struct ip_set_req_bind),
583
size - sizeof(struct ip_set_req_bind),
585
DP("set %s, ip: %u.%u.%u.%u, binding %s",
586
set->name, HIPQUAD(ip), ip_set_list[binding]->name);
589
res = ip_set_hash_add(set->id, ip, binding);
594
#define FOREACH_SET_DO(fn, args...) \
597
struct ip_set *__set; \
599
for (__i = 0; __i < ip_set_max; __i++) { \
600
__set = ip_set_list[__i]; \
602
fn(__set , ##args); \
607
__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
609
if (set_hash->id == id)
610
__set_hash_del(set_hash);
614
__unbind_default(struct ip_set *set)
616
if (set->binding != IP_SET_INVALID_ID) {
617
/* Sets as binding values are referenced */
618
__ip_set_put(set->binding);
619
set->binding = IP_SET_INVALID_ID;
624
ip_set_unbindip(ip_set_id_t index,
629
const struct ip_set_req_bind *req_bind;
634
if (size < sizeof(struct ip_set_req_bind))
639
DP("%u %s", index, req_bind->binding);
640
if (index == IP_SET_INVALID_ID) {
642
if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
643
/* Default binding of sets */
644
write_lock_bh(&ip_set_lock);
645
FOREACH_SET_DO(__unbind_default);
646
write_unlock_bh(&ip_set_lock);
648
} else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
649
/* Flush all bindings of all sets*/
650
write_lock_bh(&ip_set_lock);
651
FOREACH_HASH_RW_DO(__set_hash_del);
652
write_unlock_bh(&ip_set_lock);
655
DP("unreachable reached!");
659
set = ip_set_list[index];
661
if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
662
/* Default binding of set */
663
ip_set_id_t binding = ip_set_find_byindex(set->binding);
665
if (binding == IP_SET_INVALID_ID)
668
write_lock_bh(&ip_set_lock);
669
/* Sets in hash values are referenced */
670
__ip_set_put(set->binding);
671
set->binding = IP_SET_INVALID_ID;
672
write_unlock_bh(&ip_set_lock);
675
} else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
676
/* Flush all bindings */
678
write_lock_bh(&ip_set_lock);
679
FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
680
write_unlock_bh(&ip_set_lock);
684
res = __ip_set_testip(set,
685
data + sizeof(struct ip_set_req_bind),
686
size - sizeof(struct ip_set_req_bind),
689
DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
691
res = ip_set_hash_del(set->id, ip);
697
ip_set_testbind(ip_set_id_t index,
701
struct ip_set *set = ip_set_list[index];
702
const struct ip_set_req_bind *req_bind;
708
if (size < sizeof(struct ip_set_req_bind))
713
if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
714
/* Default binding of set */
715
const char *binding_name;
717
if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
720
binding_name = data + sizeof(struct ip_set_req_bind);
722
binding = ip_set_find_byname(binding_name);
723
if (binding == IP_SET_INVALID_ID)
726
res = (set->binding == binding) ? -EEXIST : 0;
730
binding = ip_set_find_byname(req_bind->binding);
731
if (binding == IP_SET_INVALID_ID)
735
res = __ip_set_testip(set,
736
data + sizeof(struct ip_set_req_bind),
737
size - sizeof(struct ip_set_req_bind),
739
DP("set %s, ip: %u.%u.%u.%u, binding %s",
740
set->name, HIPQUAD(ip), ip_set_list[binding]->name);
743
res = (ip_set_find_in_hash(set->id, ip) == binding)
749
static struct ip_set_type *
750
find_set_type_rlock(const char *typename)
752
struct ip_set_type *type;
754
read_lock_bh(&ip_set_lock);
755
type = find_set_type(typename);
757
read_unlock_bh(&ip_set_lock);
763
find_free_id(const char *name,
769
*id = IP_SET_INVALID_ID;
770
for (i = 0; i < ip_set_max; i++) {
771
if (ip_set_list[i] == NULL) {
772
if (*id == IP_SET_INVALID_ID)
774
} else if (SETNAME_EQ(name, ip_set_list[i]->name))
778
if (*id == IP_SET_INVALID_ID)
779
/* No free slot remained */
781
/* Check that index is usable as id (swapping) */
783
for (i = 0; i < ip_set_max; i++) {
784
if (ip_set_list[i] != NULL
785
&& ip_set_list[i]->id == *id) {
797
ip_set_create(const char *name,
798
const char *typename,
804
ip_set_id_t index = 0, id;
807
DP("setname: %s, typename: %s, id: %u", name, typename, restore);
809
* First, and without any locks, allocate and initialize
810
* a normal base set structure.
812
set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
815
set->lock = RW_LOCK_UNLOCKED;
816
strncpy(set->name, name, IP_SET_MAXNAMELEN);
817
set->binding = IP_SET_INVALID_ID;
818
atomic_set(&set->ref, 0);
821
* Next, take the &ip_set_lock, check that we know the type,
822
* and take a reference on the type, to make sure it
823
* stays available while constructing our new set.
825
* After referencing the type, we drop the &ip_set_lock,
826
* and let the new set construction run without locks.
828
set->type = find_set_type_rlock(typename);
829
if (set->type == NULL) {
830
/* Try loading the module */
831
char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
832
strcpy(modulename, "ip_set_");
833
strcat(modulename, typename);
834
DP("try to load %s", modulename);
835
request_module(modulename);
836
set->type = find_set_type_rlock(typename);
838
if (set->type == NULL) {
839
ip_set_printk("no set type '%s', set '%s' not created",
844
if (!try_module_get(set->type->me)) {
845
read_unlock_bh(&ip_set_lock);
849
read_unlock_bh(&ip_set_lock);
852
* Without holding any locks, create private part.
854
res = set->type->create(set, data, size);
858
/* BTW, res==0 here. */
861
* Here, we have a valid, constructed set. &ip_set_lock again,
862
* find free id/index and check that it is not already in
865
write_lock_bh(&ip_set_lock);
866
if ((res = find_free_id(set->name, &index, &id)) != 0) {
871
/* Make sure restore gets the same index */
872
if (restore != IP_SET_INVALID_ID && index != restore) {
873
DP("Can't restore, sets are screwed up");
879
* Finally! Add our shiny new set to the list, and be done.
881
DP("create: '%s' created with index %u, id %u!", set->name, index, id);
883
ip_set_list[index] = set;
884
write_unlock_bh(&ip_set_lock);
888
write_unlock_bh(&ip_set_lock);
889
set->type->destroy(set);
891
module_put(set->type->me);
898
* Destroy a given existing set
901
ip_set_destroy_set(ip_set_id_t index)
903
struct ip_set *set = ip_set_list[index];
906
DP("set: %s", set->name);
907
write_lock_bh(&ip_set_lock);
908
FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
909
if (set->binding != IP_SET_INVALID_ID)
910
__ip_set_put(set->binding);
911
ip_set_list[index] = NULL;
912
write_unlock_bh(&ip_set_lock);
914
/* Must call it without holding any lock */
915
set->type->destroy(set);
916
module_put(set->type->me);
921
* Destroy a set - or all sets
922
* Sets must not be referenced/used.
925
ip_set_destroy(ip_set_id_t index)
929
/* ref modification always protected by the mutex */
930
if (index != IP_SET_INVALID_ID) {
931
if (atomic_read(&ip_set_list[index]->ref))
933
ip_set_destroy_set(index);
935
for (i = 0; i < ip_set_max; i++) {
936
if (ip_set_list[i] != NULL
937
&& (atomic_read(&ip_set_list[i]->ref)))
941
for (i = 0; i < ip_set_max; i++) {
942
if (ip_set_list[i] != NULL)
943
ip_set_destroy_set(i);
950
ip_set_flush_set(struct ip_set *set)
952
DP("set: %s %u", set->name, set->id);
954
write_lock_bh(&set->lock);
955
set->type->flush(set);
956
write_unlock_bh(&set->lock);
960
* Flush data in a set - or in all sets
963
ip_set_flush(ip_set_id_t index)
965
if (index != IP_SET_INVALID_ID) {
966
IP_SET_ASSERT(ip_set_list[index]);
967
ip_set_flush_set(ip_set_list[index]);
969
FOREACH_SET_DO(ip_set_flush_set);
976
ip_set_rename(ip_set_id_t index, const char *name)
978
struct ip_set *set = ip_set_list[index];
982
DP("set: %s to %s", set->name, name);
983
write_lock_bh(&ip_set_lock);
984
for (i = 0; i < ip_set_max; i++) {
985
if (ip_set_list[i] != NULL
986
&& SETNAME_EQ(ip_set_list[i]->name, name)) {
991
strncpy(set->name, name, IP_SET_MAXNAMELEN);
993
write_unlock_bh(&ip_set_lock);
998
* Swap two sets so that name/index points to the other.
999
* References are also swapped.
1002
ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
1004
struct ip_set *from = ip_set_list[from_index];
1005
struct ip_set *to = ip_set_list[to_index];
1006
char from_name[IP_SET_MAXNAMELEN];
1009
DP("set: %s to %s", from->name, to->name);
1010
/* Features must not change. Artifical restriction. */
1011
if (from->type->features != to->type->features)
1014
/* No magic here: ref munging protected by the mutex */
1015
write_lock_bh(&ip_set_lock);
1016
strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
1017
from_ref = atomic_read(&from->ref);
1019
strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
1020
atomic_set(&from->ref, atomic_read(&to->ref));
1021
strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
1022
atomic_set(&to->ref, from_ref);
1024
ip_set_list[from_index] = to;
1025
ip_set_list[to_index] = from;
1027
write_unlock_bh(&ip_set_lock);
1036
__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
1037
ip_set_id_t id, size_t *size)
1039
if (set_hash->id == id)
1040
*size += sizeof(struct ip_set_hash_list);
1044
__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
1045
ip_set_id_t id, size_t *size)
1047
if (set_hash->id == id)
1048
*size += sizeof(struct ip_set_hash_save);
1052
__set_hash_bindings(struct ip_set_hash *set_hash,
1053
ip_set_id_t id, void *data, int *used)
1055
if (set_hash->id == id) {
1056
struct ip_set_hash_list *hash_list = data + *used;
1058
hash_list->ip = set_hash->ip;
1059
hash_list->binding = set_hash->binding;
1060
*used += sizeof(struct ip_set_hash_list);
1064
static int ip_set_list_set(ip_set_id_t index,
1069
struct ip_set *set = ip_set_list[index];
1070
struct ip_set_list *set_list;
1072
/* Pointer to our header */
1073
set_list = data + *used;
1075
DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
1077
/* Get and ensure header size */
1078
if (*used + sizeof(struct ip_set_list) > len)
1079
goto not_enough_mem;
1080
*used += sizeof(struct ip_set_list);
1082
read_lock_bh(&set->lock);
1083
/* Get and ensure set specific header size */
1084
set_list->header_size = set->type->header_size;
1085
if (*used + set_list->header_size > len)
1088
/* Fill in the header */
1089
set_list->index = index;
1090
set_list->binding = set->binding;
1091
set_list->ref = atomic_read(&set->ref);
1093
/* Fill in set spefific header data */
1094
set->type->list_header(set, data + *used);
1095
*used += set_list->header_size;
1097
/* Get and ensure set specific members size */
1098
set_list->members_size = set->type->list_members_size(set);
1099
if (*used + set_list->members_size > len)
1102
/* Fill in set spefific members data */
1103
set->type->list_members(set, data + *used);
1104
*used += set_list->members_size;
1105
read_unlock_bh(&set->lock);
1109
/* Get and ensure set specific bindings size */
1110
set_list->bindings_size = 0;
1111
FOREACH_HASH_DO(__set_hash_bindings_size_list,
1112
set->id, &set_list->bindings_size);
1113
if (*used + set_list->bindings_size > len)
1114
goto not_enough_mem;
1116
/* Fill in set spefific bindings data */
1117
FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
1122
read_unlock_bh(&set->lock);
1124
DP("not enough mem, try again");
1131
static int ip_set_save_set(ip_set_id_t index,
1137
struct ip_set_save *set_save;
1139
/* Pointer to our header */
1140
set_save = data + *used;
1142
/* Get and ensure header size */
1143
if (*used + sizeof(struct ip_set_save) > len)
1144
goto not_enough_mem;
1145
*used += sizeof(struct ip_set_save);
1147
set = ip_set_list[index];
1148
DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
1149
data, data + *used);
1151
read_lock_bh(&set->lock);
1152
/* Get and ensure set specific header size */
1153
set_save->header_size = set->type->header_size;
1154
if (*used + set_save->header_size > len)
1157
/* Fill in the header */
1158
set_save->index = index;
1159
set_save->binding = set->binding;
1161
/* Fill in set spefific header data */
1162
set->type->list_header(set, data + *used);
1163
*used += set_save->header_size;
1165
DP("set header filled: %s, used: %u(%u) %p %p", set->name, *used,
1166
set_save->header_size, data, data + *used);
1167
/* Get and ensure set specific members size */
1168
set_save->members_size = set->type->list_members_size(set);
1169
if (*used + set_save->members_size > len)
1172
/* Fill in set spefific members data */
1173
set->type->list_members(set, data + *used);
1174
*used += set_save->members_size;
1175
read_unlock_bh(&set->lock);
1176
DP("set members filled: %s, used: %u(%u) %p %p", set->name, *used,
1177
set_save->members_size, data, data + *used);
1181
read_unlock_bh(&set->lock);
1183
DP("not enough mem, try again");
1188
__set_hash_save_bindings(struct ip_set_hash *set_hash,
1196
&& (id == IP_SET_INVALID_ID || set_hash->id == id)) {
1197
struct ip_set_hash_save *hash_save = data + *used;
1198
/* Ensure bindings size */
1199
if (*used + sizeof(struct ip_set_hash_save) > len) {
1203
hash_save->id = set_hash->id;
1204
hash_save->ip = set_hash->ip;
1205
hash_save->binding = set_hash->binding;
1206
*used += sizeof(struct ip_set_hash_save);
1210
static int ip_set_save_bindings(ip_set_id_t index,
1216
struct ip_set_save *set_save;
1218
DP("used %u, len %u", *used, len);
1219
/* Get and ensure header size */
1220
if (*used + sizeof(struct ip_set_save) > len)
1224
set_save = data + *used;
1225
set_save->index = IP_SET_INVALID_ID;
1226
set_save->header_size = 0;
1227
set_save->members_size = 0;
1228
*used += sizeof(struct ip_set_save);
1230
DP("marker added used %u, len %u", *used, len);
1231
/* Fill in bindings data */
1232
if (index != IP_SET_INVALID_ID)
1233
/* Sets are identified by id in hash */
1234
index = ip_set_list[index]->id;
1235
FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
1243
static int ip_set_restore(void *data,
1247
int line = 0, used = 0, members_size;
1249
struct ip_set_hash_save *hash_save;
1250
struct ip_set_restore *set_restore;
1253
/* Loop to restore sets */
1257
DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
1258
/* Get and ensure header size */
1259
if (used + sizeof(struct ip_set_restore) > len)
1261
set_restore = data + used;
1262
used += sizeof(struct ip_set_restore);
1264
/* Ensure data size */
1266
+ set_restore->header_size
1267
+ set_restore->members_size > len)
1271
if (set_restore->index == IP_SET_INVALID_ID) {
1276
/* Try to create the set */
1277
DP("restore %s %s", set_restore->name, set_restore->typename);
1278
res = ip_set_create(set_restore->name,
1279
set_restore->typename,
1282
set_restore->header_size);
1286
used += set_restore->header_size;
1288
index = ip_set_find_byindex(set_restore->index);
1289
DP("index %u, restore_index %u", index, set_restore->index);
1290
if (index != set_restore->index)
1292
/* Try to restore members data */
1293
set = ip_set_list[index];
1295
DP("members_size %u reqsize %u",
1296
set_restore->members_size, set->type->reqsize);
1297
while (members_size + set->type->reqsize <=
1298
set_restore->members_size) {
1300
DP("members: %u, line %u", members_size, line);
1301
res = __ip_set_addip(index,
1302
data + used + members_size,
1303
set->type->reqsize);
1304
if (!(res == 0 || res == -EEXIST))
1306
members_size += set->type->reqsize;
1309
DP("members_size %u %u",
1310
set_restore->members_size, members_size);
1311
if (members_size != set_restore->members_size)
1313
used += set_restore->members_size;
1317
/* Loop to restore bindings */
1318
while (used < len) {
1321
DP("restore binding, line %u", line);
1322
/* Get and ensure size */
1323
if (used + sizeof(struct ip_set_hash_save) > len)
1325
hash_save = data + used;
1326
used += sizeof(struct ip_set_hash_save);
1328
/* hash_save->id is used to store the index */
1329
index = ip_set_find_byindex(hash_save->id);
1330
DP("restore binding index %u, id %u, %u -> %u",
1331
index, hash_save->id, hash_save->ip, hash_save->binding);
1332
if (index != hash_save->id)
1334
if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
1335
DP("corrupt binding set index %u", hash_save->binding);
1338
set = ip_set_list[hash_save->id];
1339
/* Null valued IP means default binding */
1341
res = ip_set_hash_add(set->id,
1343
hash_save->binding);
1345
IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
1346
write_lock_bh(&ip_set_lock);
1347
set->binding = hash_save->binding;
1348
__ip_set_get(set->binding);
1349
write_unlock_bh(&ip_set_lock);
1350
DP("default binding: %u", set->binding);
1362
ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
1365
int res = 0; /* Assume OK */
1367
struct ip_set_req_adt *req_adt;
1368
ip_set_id_t index = IP_SET_INVALID_ID;
1369
int (*adtfn)(ip_set_id_t index,
1370
const void *data, size_t size);
1372
int (*fn)(ip_set_id_t index,
1373
const void *data, size_t size);
1375
{ { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
1376
{ ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
1379
DP("optval=%d, user=%p, len=%d", optval, user, len);
1380
if (!capable(CAP_NET_ADMIN))
1382
if (optval != SO_IP_SET)
1384
if (len <= sizeof(unsigned)) {
1385
ip_set_printk("short userdata (want >%zu, got %u)",
1386
sizeof(unsigned), len);
1389
data = vmalloc(len);
1391
DP("out of mem for %u bytes", len);
1394
if (copy_from_user(data, user, len) != 0) {
1398
if (down_interruptible(&ip_set_app_mutex)) {
1403
op = (unsigned *)data;
1406
if (*op < IP_SET_OP_VERSION) {
1407
/* Check the version at the beginning of operations */
1408
struct ip_set_req_version *req_version = data;
1409
if (req_version->version != IP_SET_PROTOCOL_VERSION) {
1416
case IP_SET_OP_CREATE:{
1417
struct ip_set_req_create *req_create = data;
1419
if (len < sizeof(struct ip_set_req_create)) {
1420
ip_set_printk("short CREATE data (want >=%zu, got %u)",
1421
sizeof(struct ip_set_req_create), len);
1425
req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
1426
req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1427
res = ip_set_create(req_create->name,
1428
req_create->typename,
1430
data + sizeof(struct ip_set_req_create),
1431
len - sizeof(struct ip_set_req_create));
1434
case IP_SET_OP_DESTROY:{
1435
struct ip_set_req_std *req_destroy = data;
1437
if (len != sizeof(struct ip_set_req_std)) {
1438
ip_set_printk("invalid DESTROY data (want %zu, got %u)",
1439
sizeof(struct ip_set_req_std), len);
1443
if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
1444
/* Destroy all sets */
1445
index = IP_SET_INVALID_ID;
1447
req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
1448
index = ip_set_find_byname(req_destroy->name);
1450
if (index == IP_SET_INVALID_ID) {
1456
res = ip_set_destroy(index);
1459
case IP_SET_OP_FLUSH:{
1460
struct ip_set_req_std *req_flush = data;
1462
if (len != sizeof(struct ip_set_req_std)) {
1463
ip_set_printk("invalid FLUSH data (want %zu, got %u)",
1464
sizeof(struct ip_set_req_std), len);
1468
if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
1469
/* Flush all sets */
1470
index = IP_SET_INVALID_ID;
1472
req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
1473
index = ip_set_find_byname(req_flush->name);
1475
if (index == IP_SET_INVALID_ID) {
1480
res = ip_set_flush(index);
1483
case IP_SET_OP_RENAME:{
1484
struct ip_set_req_create *req_rename = data;
1486
if (len != sizeof(struct ip_set_req_create)) {
1487
ip_set_printk("invalid RENAME data (want %zu, got %u)",
1488
sizeof(struct ip_set_req_create), len);
1493
req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
1494
req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1496
index = ip_set_find_byname(req_rename->name);
1497
if (index == IP_SET_INVALID_ID) {
1501
res = ip_set_rename(index, req_rename->typename);
1504
case IP_SET_OP_SWAP:{
1505
struct ip_set_req_create *req_swap = data;
1506
ip_set_id_t to_index;
1508
if (len != sizeof(struct ip_set_req_create)) {
1509
ip_set_printk("invalid SWAP data (want %zu, got %u)",
1510
sizeof(struct ip_set_req_create), len);
1515
req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
1516
req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
1518
index = ip_set_find_byname(req_swap->name);
1519
if (index == IP_SET_INVALID_ID) {
1523
to_index = ip_set_find_byname(req_swap->typename);
1524
if (to_index == IP_SET_INVALID_ID) {
1528
res = ip_set_swap(index, to_index);
1532
break; /* Set identified by id */
1535
/* There we may have add/del/test/bind/unbind/test_bind operations */
1536
if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
1540
adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
1542
if (len < sizeof(struct ip_set_req_adt)) {
1543
ip_set_printk("short data in adt request (want >=%zu, got %u)",
1544
sizeof(struct ip_set_req_adt), len);
1550
/* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
1551
if (!(*op == IP_SET_OP_UNBIND_SET
1552
&& req_adt->index == IP_SET_INVALID_ID)) {
1553
index = ip_set_find_byindex(req_adt->index);
1554
if (index == IP_SET_INVALID_ID) {
1559
res = adtfn(index, data, len);
1562
up(&ip_set_app_mutex);
1566
DP("final result %d", res);
1571
ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
1575
ip_set_id_t index = IP_SET_INVALID_ID;
1579
DP("optval=%d, user=%p, len=%d", optval, user, *len);
1580
if (!capable(CAP_NET_ADMIN))
1582
if (optval != SO_IP_SET)
1584
if (*len < sizeof(unsigned)) {
1585
ip_set_printk("short userdata (want >=%zu, got %d)",
1586
sizeof(unsigned), *len);
1589
data = vmalloc(*len);
1591
DP("out of mem for %d bytes", *len);
1594
if (copy_from_user(data, user, *len) != 0) {
1598
if (down_interruptible(&ip_set_app_mutex)) {
1603
op = (unsigned *) data;
1606
if (*op < IP_SET_OP_VERSION) {
1607
/* Check the version at the beginning of operations */
1608
struct ip_set_req_version *req_version = data;
1609
if (req_version->version != IP_SET_PROTOCOL_VERSION) {
1616
case IP_SET_OP_VERSION: {
1617
struct ip_set_req_version *req_version = data;
1619
if (*len != sizeof(struct ip_set_req_version)) {
1620
ip_set_printk("invalid VERSION (want %zu, got %d)",
1621
sizeof(struct ip_set_req_version),
1627
req_version->version = IP_SET_PROTOCOL_VERSION;
1628
res = copy_to_user(user, req_version,
1629
sizeof(struct ip_set_req_version));
1632
case IP_SET_OP_GET_BYNAME: {
1633
struct ip_set_req_get_set *req_get = data;
1635
if (*len != sizeof(struct ip_set_req_get_set)) {
1636
ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
1637
sizeof(struct ip_set_req_get_set), *len);
1641
req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1642
index = ip_set_find_byname(req_get->set.name);
1643
req_get->set.index = index;
1646
case IP_SET_OP_GET_BYINDEX: {
1647
struct ip_set_req_get_set *req_get = data;
1649
if (*len != sizeof(struct ip_set_req_get_set)) {
1650
ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
1651
sizeof(struct ip_set_req_get_set), *len);
1655
req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1656
index = ip_set_find_byindex(req_get->set.index);
1657
strncpy(req_get->set.name,
1658
index == IP_SET_INVALID_ID ? ""
1659
: ip_set_list[index]->name, IP_SET_MAXNAMELEN);
1662
case IP_SET_OP_ADT_GET: {
1663
struct ip_set_req_adt_get *req_get = data;
1665
if (*len != sizeof(struct ip_set_req_adt_get)) {
1666
ip_set_printk("invalid ADT_GET (want %zu, got %d)",
1667
sizeof(struct ip_set_req_adt_get), *len);
1671
req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1672
index = ip_set_find_byname(req_get->set.name);
1673
if (index != IP_SET_INVALID_ID) {
1674
req_get->set.index = index;
1675
strncpy(req_get->typename,
1676
ip_set_list[index]->type->typename,
1677
IP_SET_MAXNAMELEN - 1);
1684
case IP_SET_OP_MAX_SETS: {
1685
struct ip_set_req_max_sets *req_max_sets = data;
1688
if (*len != sizeof(struct ip_set_req_max_sets)) {
1689
ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
1690
sizeof(struct ip_set_req_max_sets), *len);
1695
if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
1696
req_max_sets->set.index = IP_SET_INVALID_ID;
1698
req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
1699
req_max_sets->set.index =
1700
ip_set_find_byname(req_max_sets->set.name);
1701
if (req_max_sets->set.index == IP_SET_INVALID_ID) {
1706
req_max_sets->max_sets = ip_set_max;
1707
req_max_sets->sets = 0;
1708
for (i = 0; i < ip_set_max; i++) {
1709
if (ip_set_list[i] != NULL)
1710
req_max_sets->sets++;
1714
case IP_SET_OP_LIST_SIZE:
1715
case IP_SET_OP_SAVE_SIZE: {
1716
struct ip_set_req_setnames *req_setnames = data;
1717
struct ip_set_name_list *name_list;
1722
if (*len < sizeof(struct ip_set_req_setnames)) {
1723
ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
1724
sizeof(struct ip_set_req_setnames), *len);
1729
req_setnames->size = 0;
1730
used = sizeof(struct ip_set_req_setnames);
1731
for (i = 0; i < ip_set_max; i++) {
1732
if (ip_set_list[i] == NULL)
1734
name_list = data + used;
1735
used += sizeof(struct ip_set_name_list);
1736
if (used > copylen) {
1740
set = ip_set_list[i];
1741
/* Fill in index, name, etc. */
1742
name_list->index = i;
1743
name_list->id = set->id;
1744
strncpy(name_list->name,
1746
IP_SET_MAXNAMELEN - 1);
1747
strncpy(name_list->typename,
1748
set->type->typename,
1749
IP_SET_MAXNAMELEN - 1);
1750
DP("filled %s of type %s, index %u\n",
1751
name_list->name, name_list->typename,
1753
if (!(req_setnames->index == IP_SET_INVALID_ID
1754
|| req_setnames->index == i))
1758
case IP_SET_OP_LIST_SIZE: {
1759
req_setnames->size += sizeof(struct ip_set_list)
1760
+ set->type->header_size
1761
+ set->type->list_members_size(set);
1762
/* Sets are identified by id in the hash */
1763
FOREACH_HASH_DO(__set_hash_bindings_size_list,
1764
set->id, &req_setnames->size);
1767
case IP_SET_OP_SAVE_SIZE: {
1768
req_setnames->size += sizeof(struct ip_set_save)
1769
+ set->type->header_size
1770
+ set->type->list_members_size(set);
1771
FOREACH_HASH_DO(__set_hash_bindings_size_save,
1772
set->id, &req_setnames->size);
1779
if (copylen != used) {
1785
case IP_SET_OP_LIST: {
1786
struct ip_set_req_list *req_list = data;
1790
if (*len < sizeof(struct ip_set_req_list)) {
1791
ip_set_printk("short LIST (want >=%zu, got %d)",
1792
sizeof(struct ip_set_req_list), *len);
1796
index = req_list->index;
1797
if (index != IP_SET_INVALID_ID
1798
&& ip_set_find_byindex(index) != index) {
1803
if (index == IP_SET_INVALID_ID) {
1805
for (i = 0; i < ip_set_max && res == 0; i++) {
1806
if (ip_set_list[i] != NULL)
1807
res = ip_set_list_set(i, data, &used, *len);
1810
/* List an individual set */
1811
res = ip_set_list_set(index, data, &used, *len);
1815
else if (copylen != used) {
1821
case IP_SET_OP_SAVE: {
1822
struct ip_set_req_list *req_save = data;
1826
if (*len < sizeof(struct ip_set_req_list)) {
1827
ip_set_printk("short SAVE (want >=%zu, got %d)",
1828
sizeof(struct ip_set_req_list), *len);
1832
index = req_save->index;
1833
if (index != IP_SET_INVALID_ID
1834
&& ip_set_find_byindex(index) != index) {
1839
if (index == IP_SET_INVALID_ID) {
1841
for (i = 0; i < ip_set_max && res == 0; i++) {
1842
if (ip_set_list[i] != NULL)
1843
res = ip_set_save_set(i, data, &used, *len);
1846
/* Save an individual set */
1847
res = ip_set_save_set(index, data, &used, *len);
1850
res = ip_set_save_bindings(index, data, &used, *len);
1854
else if (copylen != used) {
1860
case IP_SET_OP_RESTORE: {
1861
struct ip_set_req_setnames *req_restore = data;
1864
if (*len < sizeof(struct ip_set_req_setnames)
1865
|| *len != req_restore->size) {
1866
ip_set_printk("invalid RESTORE (want =%zu, got %d)",
1867
req_restore->size, *len);
1871
line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
1872
req_restore->size - sizeof(struct ip_set_req_setnames));
1873
DP("ip_set_restore: %u", line);
1876
req_restore->size = line;
1877
copylen = sizeof(struct ip_set_req_setnames);
1885
} /* end of switch(op) */
1888
DP("set %s, copylen %u", index != IP_SET_INVALID_ID
1889
&& ip_set_list[index]
1890
? ip_set_list[index]->name
1891
: ":all:", copylen);
1892
res = copy_to_user(user, data, copylen);
1895
up(&ip_set_app_mutex);
1899
DP("final result %d", res);
1903
static struct nf_sockopt_ops so_set = {
1905
.set_optmin = SO_IP_SET,
1906
.set_optmax = SO_IP_SET + 1,
1907
.set = &ip_set_sockfn_set,
1908
.get_optmin = SO_IP_SET,
1909
.get_optmax = SO_IP_SET + 1,
1910
.get = &ip_set_sockfn_get,
1911
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1914
.owner = THIS_MODULE,
1918
static int max_sets, hash_size;
1919
module_param(max_sets, int, 0600);
1920
MODULE_PARM_DESC(max_sets, "maximal number of sets");
1921
module_param(hash_size, int, 0600);
1922
MODULE_PARM_DESC(hash_size, "hash size for bindings");
1923
MODULE_LICENSE("GPL");
1924
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
1925
MODULE_DESCRIPTION("module implementing core IP set support");
1927
static int __init ip_set_init(void)
1932
get_random_bytes(&ip_set_hash_random, 4);
1934
ip_set_max = max_sets;
1935
ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
1937
printk(KERN_ERR "Unable to create ip_set_list\n");
1940
memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
1942
ip_set_bindings_hash_size = hash_size;
1943
ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
1945
printk(KERN_ERR "Unable to create ip_set_hash\n");
1949
for (i = 0; i < ip_set_bindings_hash_size; i++)
1950
INIT_LIST_HEAD(&ip_set_hash[i]);
1952
INIT_LIST_HEAD(&set_type_list);
1954
res = nf_register_sockopt(&so_set);
1956
ip_set_printk("SO_SET registry failed: %d", res);
1964
static void __exit ip_set_fini(void)
1966
/* There can't be any existing set or binding */
1967
nf_unregister_sockopt(&so_set);
1970
DP("these are the famous last words");
1973
EXPORT_SYMBOL(ip_set_register_set_type);
1974
EXPORT_SYMBOL(ip_set_unregister_set_type);
1976
EXPORT_SYMBOL(ip_set_get_byname);
1977
EXPORT_SYMBOL(ip_set_get_byindex);
1978
EXPORT_SYMBOL(ip_set_put);
1980
EXPORT_SYMBOL(ip_set_addip_kernel);
1981
EXPORT_SYMBOL(ip_set_delip_kernel);
1982
EXPORT_SYMBOL(ip_set_testip_kernel);
1984
module_init(ip_set_init);
1985
module_exit(ip_set_fini);