1
#ifndef _IP_SET_CHASH_H
2
#define _IP_SET_CHASH_H
6
#include "ip_set_timeout.h"
8
/* Cacheline friendly hash with resizing when linear searching becomes too
9
* long. Internally jhash is used with the assumption that the size of the
10
* stored data is a multiple of sizeof(u32). If storage supports timeout,
11
* the timeout field must be the last one in the data structure - that field
12
* is ignored when computing the hash key.
15
/* Number of elements to store in an array block */
16
#define CHASH_DEFAULT_ARRAY_SIZE 4
17
/* Number of arrays: max ARRAY_SIZE * CHAIN_LIMIT "long" chains */
18
#define CHASH_DEFAULT_CHAIN_LIMIT 3
20
/* Book-keeping of the prefixes added to the set */
22
u8 cidr; /* the different cidr values in the set */
23
u32 nets; /* number of elements per cidr */
27
u8 htable_bits; /* size of hash table == 2^htable_bits */
28
struct slist htable[0]; /* hashtable of single linked lists */
32
struct htable *table; /* the hash table */
33
u32 maxelem; /* max elements in the hash */
34
u32 elements; /* current element (vs timeout) */
35
u32 initval; /* random jhash init value */
36
u32 timeout; /* timeout value, if enabled */
37
struct timer_list gc; /* garbage collection when timeout enabled */
38
u8 array_size; /* number of elements in an array */
39
u8 chain_limit; /* max number of arrays */
40
#ifdef IP_SET_HASH_WITH_NETMASK
41
u8 netmask; /* netmask value for subnets to store */
43
#ifdef IP_SET_HASH_WITH_NETS
44
struct chash_nets nets[0]; /* book-keeping of prefixes */
48
/* Compute htable_bits from the user input parameter hashsize */
50
htable_bits(u32 hashsize)
52
/* Assume that hashsize == 2^htable_bits */
53
u8 bits = fls(hashsize - 1);
54
if (jhash_size(bits) != hashsize)
55
/* Round up to the first 2^n value */
61
#ifdef IP_SET_HASH_WITH_NETS
63
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
65
/* Network cidr size book keeping when the hash stores different
68
add_cidr(struct chash *h, u8 cidr, u8 host_mask)
72
++h->nets[cidr-1].nets;
74
pr_debug("add_cidr added %u: %u", cidr, h->nets[cidr-1].nets);
76
if (h->nets[cidr-1].nets > 1)
80
for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
81
/* Add in increasing prefix order, so larger cidr first */
82
if (h->nets[i].cidr < cidr)
83
swap(h->nets[i].cidr, cidr);
86
h->nets[i].cidr = cidr;
90
del_cidr(struct chash *h, u8 cidr, u8 host_mask)
94
--h->nets[cidr-1].nets;
96
pr_debug("del_cidr deleted %u: %u", cidr, h->nets[cidr-1].nets);
98
if (h->nets[cidr-1].nets != 0)
101
/* All entries with this cidr size deleted, so cleanup h->cidr[] */
102
for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
103
if (h->nets[i].cidr == cidr)
104
h->nets[i].cidr = cidr = h->nets[i+1].cidr;
106
h->nets[i - 1].cidr = 0;
110
/* Destroy the hashtable part of the set */
112
chash_destroy(struct htable *ht)
114
struct slist *n, *tmp;
117
for (i = 0; i < jhash_size(ht->htable_bits); i++)
118
slist_for_each_safe(n, tmp, &ht->htable[i])
119
/* FIXME: use slab cache */
125
/* Calculate the actual memory size of the set data */
127
chash_memsize(const struct chash *h, size_t dsize, u8 host_mask)
131
struct htable *ht = h->table;
132
size_t memsize = sizeof(*h)
133
#ifdef IP_SET_HASH_WITH_NETS
134
+ sizeof(struct chash_nets) * host_mask
136
+ jhash_size(ht->htable_bits) * sizeof(struct slist);
138
for (i = 0; i < jhash_size(ht->htable_bits); i++)
139
slist_for_each(n, &ht->htable[i])
140
memsize += sizeof(struct slist)
141
+ h->array_size * dsize;
146
/* Flush a hash type of set: destroy all elements */
148
ip_set_hash_flush(struct ip_set *set)
150
struct chash *h = set->data;
151
struct htable *ht = h->table;
152
struct slist *n, *tmp;
155
for (i = 0; i < jhash_size(ht->htable_bits); i++) {
156
slist_for_each_safe(n, tmp, &ht->htable[i])
157
/* FIXME: slab cache */
159
ht->htable[i].next = NULL;
161
#ifdef IP_SET_HASH_WITH_NETS
162
memset(h->nets, 0, sizeof(struct chash_nets)
163
* SET_HOST_MASK(set->family));
168
/* Destroy a hash type of set */
170
ip_set_hash_destroy(struct ip_set *set)
172
struct chash *h = set->data;
174
if (with_timeout(h->timeout))
175
del_timer_sync(&h->gc);
177
chash_destroy(h->table);
183
#define JHASH2(data, initval, htable_bits) \
184
(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
185
& jhash_mask(htable_bits))
187
#endif /* _IP_SET_CHASH_H */
189
#define CONCAT(a, b, c) a##b##c
190
#define TOKEN(a, b, c) CONCAT(a, b, c)
192
/* Type/family dependent function prototypes */
194
#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal)
195
#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull)
196
#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy)
197
#define type_pf_data_swap TOKEN(TYPE, PF, _data_swap)
198
#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out)
199
#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask)
200
#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
201
#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
203
#define type_pf_elem TOKEN(TYPE, PF, _elem)
204
#define type_pf_telem TOKEN(TYPE, PF, _telem)
205
#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout)
206
#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired)
207
#define type_pf_data_swap_timeout TOKEN(TYPE, PF, _data_swap_timeout)
208
#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
210
#define type_pf_chash_readd TOKEN(TYPE, PF, _chash_readd)
211
#define type_pf_chash_del_elem TOKEN(TYPE, PF, _chash_del_elem)
212
#define type_pf_chash_add TOKEN(TYPE, PF, _chash_add)
213
#define type_pf_chash_del TOKEN(TYPE, PF, _chash_del)
214
#define type_pf_chash_test_cidrs TOKEN(TYPE, PF, _chash_test_cidrs)
215
#define type_pf_chash_test TOKEN(TYPE, PF, _chash_test)
217
#define type_pf_chash_treadd TOKEN(TYPE, PF, _chash_treadd)
218
#define type_pf_chash_del_telem TOKEN(TYPE, PF, _chash_del_telem)
219
#define type_pf_chash_expire TOKEN(TYPE, PF, _chash_expire)
220
#define type_pf_chash_tadd TOKEN(TYPE, PF, _chash_tadd)
221
#define type_pf_chash_tdel TOKEN(TYPE, PF, _chash_tdel)
222
#define type_pf_chash_ttest_cidrs TOKEN(TYPE, PF, _chash_ttest_cidrs)
223
#define type_pf_chash_ttest TOKEN(TYPE, PF, _chash_ttest)
225
#define type_pf_resize TOKEN(TYPE, PF, _resize)
226
#define type_pf_tresize TOKEN(TYPE, PF, _tresize)
227
#define type_pf_flush ip_set_hash_flush
228
#define type_pf_destroy ip_set_hash_destroy
229
#define type_pf_head TOKEN(TYPE, PF, _head)
230
#define type_pf_list TOKEN(TYPE, PF, _list)
231
#define type_pf_tlist TOKEN(TYPE, PF, _tlist)
232
#define type_pf_same_set TOKEN(TYPE, PF, _same_set)
233
#define type_pf_kadt TOKEN(TYPE, PF, _kadt)
234
#define type_pf_uadt TOKEN(TYPE, PF, _uadt)
235
#define type_pf_gc TOKEN(TYPE, PF, _gc)
236
#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init)
237
#define type_pf_variant TOKEN(TYPE, PF, _variant)
238
#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant)
240
/* Flavour without timeout */
242
/* Get the ith element from the array block n */
243
#define chash_data(n, i) \
244
(struct type_pf_elem *)((char *)(n) + sizeof(struct slist) \
245
+ (i)*sizeof(struct type_pf_elem))
247
/* Add an element to the hash table when resizing the set:
248
* we spare the maintenance of the internal counters. */
250
type_pf_chash_readd(struct chash *h, struct htable *ht,
251
const struct type_pf_elem *value,
254
struct slist *n, *prev;
255
struct type_pf_elem *data;
258
u32 hash = JHASH2(value, h->initval, ht->htable_bits);
260
slist_for_each_prev(prev, n, &ht->htable[hash]) {
261
for (i = 0; i < h->array_size; i++) {
262
data = chash_data(n, i);
263
if (type_pf_data_isnull(data)) {
270
if (j < h->chain_limit) {
271
tmp = kzalloc(h->array_size * sizeof(struct type_pf_elem)
272
+ sizeof(struct slist), gfp_flags);
275
prev->next = (struct slist *) tmp;
276
data = chash_data(tmp, 0);
278
/* Trigger rehashing */
282
type_pf_data_copy(data, value);
286
/* Delete an element from the hash table: swap it with the last
287
* element in the hash bucket and free up the array if it was
288
* completely emptied */
290
type_pf_chash_del_elem(struct chash *h, struct slist *prev,
291
struct slist *n, int i)
293
struct type_pf_elem *data = chash_data(n, i);
295
int j; /* Index in array */
297
if (n->next != NULL) {
298
for (prev = n, tmp = n->next;
300
prev = tmp, tmp = tmp->next)
301
/* Find last array */;
304
/* Already at last array */
308
/* Find last non-empty element */
309
for (; j < h->array_size - 1; j++)
310
if (type_pf_data_isnull(chash_data(tmp, j + 1)))
313
if (!(tmp == n && i == j))
314
type_pf_data_swap(data, chash_data(tmp, j));
316
#ifdef IP_SET_HASH_WITH_NETS
317
del_cidr(h, data->cidr, HOST_MASK);
323
type_pf_data_zero_out(chash_data(tmp, j));
328
/* Resize a hash: create a new hash table with doubling the hashsize
329
* and inserting the elements to it. Repeat until we succeed or
330
* fail due to memory pressures. */
332
type_pf_resize(struct ip_set *set, gfp_t gfp_flags, bool retried)
334
struct chash *h = set->data;
335
struct htable *ht, *orig = h->table;
336
u8 htable_bits = orig->htable_bits;
338
const struct type_pf_elem *data;
346
/* In case we have plenty of memory :-) */
347
return -IPSET_ERR_HASH_FULL;
348
ht = ip_set_alloc(sizeof(*ht)
349
+ jhash_size(htable_bits) * sizeof(struct slist),
353
ht->htable_bits = htable_bits;
355
read_lock_bh(&set->lock);
357
for (; i < jhash_size(orig->htable_bits); i++) {
358
slist_for_each(n, &orig->htable[i]) {
359
for (j = 0; j < h->array_size; j++) {
360
data = chash_data(n, j);
361
if (type_pf_data_isnull(data)) {
365
ret = type_pf_chash_readd(h, ht,
368
read_unlock_bh(&set->lock);
379
read_unlock_bh(&set->lock);
381
/* Give time to other users of the set */
389
/* Add an element to a hash and update the internal counters when succeeded,
390
* otherwise report the proper error code. */
392
type_pf_chash_add(struct ip_set *set, void *value,
393
gfp_t gfp_flags, u32 timeout)
395
struct chash *h = set->data;
396
const struct type_pf_elem *d = value;
397
struct slist *n, *prev;
398
struct htable *ht = h->table;
399
struct type_pf_elem *data;
404
if (h->elements >= h->maxelem)
405
return -IPSET_ERR_HASH_FULL;
407
hash = JHASH2(value, h->initval, ht->htable_bits);
408
slist_for_each_prev(prev, n, &ht->htable[hash]) {
409
for (i = 0; i < h->array_size; i++) {
410
data = chash_data(n, i);
411
if (type_pf_data_isnull(data)) {
415
if (type_pf_data_equal(data, d))
416
return -IPSET_ERR_EXIST;
420
if (j < h->chain_limit) {
421
tmp = kzalloc(h->array_size * sizeof(struct type_pf_elem)
422
+ sizeof(struct slist), gfp_flags);
425
prev->next = (struct slist *) tmp;
426
data = chash_data(tmp, 0);
432
type_pf_data_copy(data, d);
433
#ifdef IP_SET_HASH_WITH_NETS
434
add_cidr(h, d->cidr, HOST_MASK);
440
/* Delete an element from the hash */
442
type_pf_chash_del(struct ip_set *set, void *value,
443
gfp_t gfp_flags, u32 timeout)
445
struct chash *h = set->data;
446
const struct type_pf_elem *d = value;
447
struct htable *ht = h->table;
448
struct slist *n, *prev;
450
struct type_pf_elem *data;
451
u32 hash = JHASH2(value, h->initval, ht->htable_bits);
453
slist_for_each_prev(prev, n, &ht->htable[hash])
454
for (i = 0; i < h->array_size; i++) {
455
data = chash_data(n, i);
456
if (type_pf_data_isnull(data))
457
return -IPSET_ERR_EXIST;
458
if (type_pf_data_equal(data, d)) {
459
type_pf_chash_del_elem(h, prev, n, i);
464
return -IPSET_ERR_EXIST;
467
#ifdef IP_SET_HASH_WITH_NETS
469
/* Special test function which takes into account the different network
470
* sizes added to the set */
472
type_pf_chash_test_cidrs(struct ip_set *set,
473
struct type_pf_elem *d,
474
gfp_t gfp_flags, u32 timeout)
476
struct chash *h = set->data;
477
struct htable *ht = h->table;
479
const struct type_pf_elem *data;
482
u8 host_mask = SET_HOST_MASK(set->family);
485
pr_debug("test by nets");
486
for (; j < host_mask && h->nets[j].cidr; j++) {
487
type_pf_data_netmask(d, h->nets[j].cidr);
488
hash = JHASH2(d, h->initval, ht->htable_bits);
489
slist_for_each(n, &ht->htable[hash])
490
for (i = 0; i < h->array_size; i++) {
491
data = chash_data(n, i);
492
if (type_pf_data_isnull(data)) {
496
if (type_pf_data_equal(data, d))
504
/* Test whether the element is added to the set */
506
type_pf_chash_test(struct ip_set *set, void *value,
507
gfp_t gfp_flags, u32 timeout)
509
struct chash *h = set->data;
510
struct htable *ht = h->table;
511
struct type_pf_elem *d = value;
513
const struct type_pf_elem *data;
517
#ifdef IP_SET_HASH_WITH_NETS
518
/* If we test an IP address and not a network address,
519
* try all possible network sizes */
520
if (d->cidr == SET_HOST_MASK(set->family))
521
return type_pf_chash_test_cidrs(set, d, gfp_flags, timeout);
524
hash = JHASH2(d, h->initval, ht->htable_bits);
525
slist_for_each(n, &ht->htable[hash])
526
for (i = 0; i < h->array_size; i++) {
527
data = chash_data(n, i);
528
if (type_pf_data_isnull(data))
530
if (type_pf_data_equal(data, d))
536
/* Reply a HEADER request: fill out the header part of the set */
538
type_pf_head(struct ip_set *set, struct sk_buff *skb)
540
const struct chash *h = set->data;
541
struct nlattr *nested;
544
read_lock_bh(&set->lock);
545
memsize = chash_memsize(h, with_timeout(h->timeout)
546
? sizeof(struct type_pf_telem)
547
: sizeof(struct type_pf_elem),
548
set->family == AF_INET ? 32 : 128);
549
read_unlock_bh(&set->lock);
551
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
553
goto nla_put_failure;
554
NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
555
htonl(jhash_size(h->table->htable_bits)));
556
NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
557
#ifdef IP_SET_HASH_WITH_NETMASK
558
if (h->netmask != HOST_MASK)
559
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
561
NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
562
htonl(atomic_read(&set->ref) - 1));
563
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
564
if (with_timeout(h->timeout))
565
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
566
ipset_nest_end(skb, nested);
573
/* Reply a LIST/SAVE request: dump the elements of the specified set */
575
type_pf_list(struct ip_set *set,
576
struct sk_buff *skb, struct netlink_callback *cb)
578
const struct chash *h = set->data;
579
const struct htable *ht = h->table;
580
struct nlattr *atd, *nested;
582
const struct type_pf_elem *data;
583
u32 first = cb->args[2];
584
/* We assume that one hash bucket fills into one page */
588
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
591
pr_debug("list hash set %s", set->name);
592
for (; cb->args[2] < jhash_size(ht->htable_bits); cb->args[2]++) {
593
incomplete = skb_tail_pointer(skb);
594
slist_for_each(n, &ht->htable[cb->args[2]]) {
595
for (i = 0; i < h->array_size; i++) {
596
data = chash_data(n, i);
597
if (type_pf_data_isnull(data))
599
pr_debug("list hash %lu slist %p i %u",
601
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
603
if (cb->args[2] == first) {
604
nla_nest_cancel(skb, atd);
607
goto nla_put_failure;
609
if (type_pf_data_list(skb, data))
610
goto nla_put_failure;
611
ipset_nest_end(skb, nested);
615
ipset_nest_end(skb, atd);
616
/* Set listing finished */
622
nlmsg_trim(skb, incomplete);
623
ipset_nest_end(skb, atd);
624
if (unlikely(first == cb->args[2])) {
625
pr_warn("Can't list set %s: one bucket does not fit into "
626
"a message. Please report it!\n", set->name);
633
type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
634
enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
636
type_pf_uadt(struct ip_set *set, struct nlattr *head, int len,
637
enum ipset_adt adt, u32 *lineno, u32 flags);
639
static const struct ip_set_type_variant type_pf_variant = {
640
.kadt = type_pf_kadt,
641
.uadt = type_pf_uadt,
643
[IPSET_ADD] = type_pf_chash_add,
644
[IPSET_DEL] = type_pf_chash_del,
645
[IPSET_TEST] = type_pf_chash_test,
647
.destroy = type_pf_destroy,
648
.flush = type_pf_flush,
649
.head = type_pf_head,
650
.list = type_pf_list,
651
.resize = type_pf_resize,
652
.same_set = type_pf_same_set,
655
/* Flavour with timeout support */
657
#define chash_tdata(n, i) \
658
(struct type_pf_elem *)((char *)(n) + sizeof(struct slist) \
659
+ (i)*sizeof(struct type_pf_telem))
662
type_pf_data_timeout(const struct type_pf_elem *data)
664
const struct type_pf_telem *tdata =
665
(const struct type_pf_telem *) data;
667
return tdata->timeout;
671
type_pf_data_expired(const struct type_pf_elem *data)
673
const struct type_pf_telem *tdata =
674
(const struct type_pf_telem *) data;
676
return ip_set_timeout_expired(tdata->timeout);
680
type_pf_data_swap_timeout(struct type_pf_elem *src,
681
struct type_pf_elem *dst)
683
struct type_pf_telem *x = (struct type_pf_telem *) src;
684
struct type_pf_telem *y = (struct type_pf_telem *) dst;
686
swap(x->timeout, y->timeout);
690
type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
692
struct type_pf_telem *tdata = (struct type_pf_telem *) data;
694
tdata->timeout = ip_set_timeout_set(timeout);
698
type_pf_chash_treadd(struct chash *h, struct htable *ht,
699
const struct type_pf_elem *value,
700
gfp_t gfp_flags, u32 timeout)
702
struct slist *n, *prev;
703
struct type_pf_elem *data;
706
u32 hash = JHASH2(value, h->initval, ht->htable_bits);
708
slist_for_each_prev(prev, n, &ht->htable[hash]) {
709
for (i = 0; i < h->array_size; i++) {
710
data = chash_tdata(n, i);
711
if (type_pf_data_isnull(data)) {
718
if (j < h->chain_limit) {
719
tmp = kzalloc(h->array_size * sizeof(struct type_pf_telem)
720
+ sizeof(struct slist), gfp_flags);
723
prev->next = (struct slist *) tmp;
724
data = chash_tdata(tmp, 0);
726
/* Trigger rehashing */
730
type_pf_data_copy(data, value);
731
type_pf_data_timeout_set(data, timeout);
736
type_pf_chash_del_telem(struct chash *h, struct slist *prev,
737
struct slist *n, int i)
739
struct type_pf_elem *d, *data = chash_tdata(n, i);
741
int j; /* Index in array */
743
pr_debug("del %u", i);
744
if (n->next != NULL) {
745
for (prev = n, tmp = n->next;
747
prev = tmp, tmp = tmp->next)
748
/* Find last array */;
751
/* Already at last array */
755
/* Find last non-empty element */
756
for (; j < h->array_size - 1; j++)
757
if (type_pf_data_isnull(chash_tdata(tmp, j + 1)))
760
d = chash_tdata(tmp, j);
761
if (!(tmp == n && i == j)) {
762
type_pf_data_swap(data, d);
763
type_pf_data_swap_timeout(data, d);
765
#ifdef IP_SET_HASH_WITH_NETS
766
del_cidr(h, data->cidr, HOST_MASK);
772
type_pf_data_zero_out(d);
777
/* Delete expired elements from the hashtable */
779
type_pf_chash_expire(struct chash *h)
781
struct htable *ht = h->table;
782
struct slist *n, *prev;
783
struct type_pf_elem *data;
787
for (i = 0; i < jhash_size(ht->htable_bits); i++)
788
slist_for_each_prev(prev, n, &ht->htable[i])
789
for (j = 0; j < h->array_size; j++) {
790
data = chash_tdata(n, j);
791
if (type_pf_data_isnull(data))
793
if (type_pf_data_expired(data)) {
794
pr_debug("expire %u/%u", i, j);
795
type_pf_chash_del_telem(h, prev, n, j);
801
type_pf_tresize(struct ip_set *set, gfp_t gfp_flags, bool retried)
803
struct chash *h = set->data;
804
struct htable *ht, *orig = h->table;
805
u8 htable_bits = orig->htable_bits;
807
const struct type_pf_elem *data;
811
/* Try to cleanup once */
814
write_lock_bh(&set->lock);
815
type_pf_chash_expire(set->data);
816
write_unlock_bh(&set->lock);
825
/* In case we have plenty of memory :-) */
826
return -IPSET_ERR_HASH_FULL;
827
ht = ip_set_alloc(sizeof(*ht)
828
+ jhash_size(htable_bits) * sizeof(struct slist),
832
ht->htable_bits = htable_bits;
834
read_lock_bh(&set->lock);
836
for (; i < jhash_size(orig->htable_bits); i++) {
837
slist_for_each(n, &orig->htable[i]) {
838
for (j = 0; j < h->array_size; j++) {
839
data = chash_tdata(n, j);
840
if (type_pf_data_isnull(data)) {
844
ret = type_pf_chash_treadd(h, ht,
846
type_pf_data_timeout(data));
848
read_unlock_bh(&set->lock);
859
read_unlock_bh(&set->lock);
861
/* Give time to other users of the set */
870
type_pf_chash_tadd(struct ip_set *set, void *value,
871
gfp_t gfp_flags, u32 timeout)
873
struct chash *h = set->data;
874
const struct type_pf_elem *d = value;
875
struct slist *n, *prev;
876
struct htable *ht = h->table;
877
struct type_pf_elem *data;
882
if (h->elements >= h->maxelem)
883
/* FIXME: when set is full, we slow down here */
884
type_pf_chash_expire(h);
885
if (h->elements >= h->maxelem)
886
return -IPSET_ERR_HASH_FULL;
888
hash = JHASH2(d, h->initval, ht->htable_bits);
889
slist_for_each_prev(prev, n, &ht->htable[hash]) {
890
for (i = 0; i < h->array_size; i++) {
891
data = chash_tdata(n, i);
892
if (type_pf_data_isnull(data)
893
|| type_pf_data_expired(data)) {
897
if (type_pf_data_equal(data, d))
898
return -IPSET_ERR_EXIST;
902
if (j < h->chain_limit) {
903
tmp = kzalloc(h->array_size * sizeof(struct type_pf_telem)
904
+ sizeof(struct slist), gfp_flags);
907
prev->next = (struct slist *) tmp;
908
data = chash_tdata(tmp, 0);
914
if (type_pf_data_isnull(data))
916
#ifdef IP_SET_HASH_WITH_NETS
918
del_cidr(h, data->cidr, HOST_MASK);
920
add_cidr(h, d->cidr, HOST_MASK);
922
type_pf_data_copy(data, d);
923
type_pf_data_timeout_set(data, timeout);
928
type_pf_chash_tdel(struct ip_set *set, void *value,
929
gfp_t gfp_flags, u32 timeout)
931
struct chash *h = set->data;
932
struct htable *ht = h->table;
933
const struct type_pf_elem *d = value;
934
struct slist *n, *prev;
936
struct type_pf_elem *data;
937
u32 hash = JHASH2(value, h->initval, ht->htable_bits);
939
slist_for_each_prev(prev, n, &ht->htable[hash])
940
for (i = 0; i < h->array_size; i++) {
941
data = chash_tdata(n, i);
942
if (type_pf_data_isnull(data))
943
return -IPSET_ERR_EXIST;
944
if (type_pf_data_equal(data, d)) {
945
if (type_pf_data_expired(data))
946
ret = -IPSET_ERR_EXIST;
947
type_pf_chash_del_telem(h, prev, n, i);
952
return -IPSET_ERR_EXIST;
955
#ifdef IP_SET_HASH_WITH_NETS
957
type_pf_chash_ttest_cidrs(struct ip_set *set,
958
struct type_pf_elem *d,
959
gfp_t gfp_flags, u32 timeout)
961
struct chash *h = set->data;
962
struct htable *ht = h->table;
963
struct type_pf_elem *data;
967
u8 host_mask = SET_HOST_MASK(set->family);
970
for (; j < host_mask && h->nets[j].cidr; j++) {
971
type_pf_data_netmask(d, h->nets[j].cidr);
972
hash = JHASH2(d, h->initval, ht->htable_bits);
973
slist_for_each(n, &ht->htable[hash])
974
for (i = 0; i < h->array_size; i++) {
975
data = chash_tdata(n, i);
976
if (type_pf_data_isnull(data)) {
980
if (type_pf_data_equal(data, d))
981
return !type_pf_data_expired(data);
989
type_pf_chash_ttest(struct ip_set *set, void *value,
990
gfp_t gfp_flags, u32 timeout)
992
struct chash *h = set->data;
993
struct htable *ht = h->table;
994
struct type_pf_elem *data, *d = value;
999
#ifdef IP_SET_HASH_WITH_NETS
1000
if (d->cidr == SET_HOST_MASK(set->family))
1001
return type_pf_chash_ttest_cidrs(set, d, gfp_flags,
1004
hash = JHASH2(d, h->initval, ht->htable_bits);
1005
slist_for_each(n, &ht->htable[hash])
1006
for (i = 0; i < h->array_size; i++) {
1007
data = chash_tdata(n, i);
1008
if (type_pf_data_isnull(data))
1010
if (type_pf_data_equal(data, d))
1011
return !type_pf_data_expired(data);
1017
type_pf_tlist(struct ip_set *set,
1018
struct sk_buff *skb, struct netlink_callback *cb)
1020
const struct chash *h = set->data;
1021
const struct htable *ht = h->table;
1022
struct nlattr *atd, *nested;
1024
const struct type_pf_elem *data;
1025
u32 first = cb->args[2];
1026
/* We assume that one hash bucket fills into one page */
1030
atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
1033
for (; cb->args[2] < jhash_size(ht->htable_bits); cb->args[2]++) {
1034
incomplete = skb_tail_pointer(skb);
1035
slist_for_each(n, &ht->htable[cb->args[2]]) {
1036
for (i = 0; i < h->array_size; i++) {
1037
data = chash_tdata(n, i);
1038
pr_debug("list %p %u", n, i);
1039
if (type_pf_data_isnull(data))
1041
if (type_pf_data_expired(data))
1043
pr_debug("do list %p %u", n, i);
1044
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
1046
if (cb->args[2] == first) {
1047
nla_nest_cancel(skb, atd);
1050
goto nla_put_failure;
1052
if (type_pf_data_tlist(skb, data))
1053
goto nla_put_failure;
1054
ipset_nest_end(skb, nested);
1058
ipset_nest_end(skb, atd);
1059
/* Set listing finished */
1065
nlmsg_trim(skb, incomplete);
1066
ipset_nest_end(skb, atd);
1067
if (unlikely(first == cb->args[2])) {
1068
pr_warn("Can't list set %s: one bucket does not fit into "
1069
"a message. Please report it!\n", set->name);
1075
static const struct ip_set_type_variant type_pf_tvariant = {
1076
.kadt = type_pf_kadt,
1077
.uadt = type_pf_uadt,
1079
[IPSET_ADD] = type_pf_chash_tadd,
1080
[IPSET_DEL] = type_pf_chash_tdel,
1081
[IPSET_TEST] = type_pf_chash_ttest,
1083
.destroy = type_pf_destroy,
1084
.flush = type_pf_flush,
1085
.head = type_pf_head,
1086
.list = type_pf_tlist,
1087
.resize = type_pf_tresize,
1088
.same_set = type_pf_same_set,
1092
type_pf_gc(unsigned long ul_set)
1094
struct ip_set *set = (struct ip_set *) ul_set;
1095
struct chash *h = set->data;
1098
write_lock_bh(&set->lock);
1099
type_pf_chash_expire(h);
1100
write_unlock_bh(&set->lock);
1102
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
1107
type_pf_gc_init(struct ip_set *set)
1109
struct chash *h = set->data;
1112
h->gc.data = (unsigned long) set;
1113
h->gc.function = type_pf_gc;
1114
h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
1116
pr_debug("gc initialized, run in every %u",
1117
IPSET_GC_PERIOD(h->timeout));
1120
#undef type_pf_data_equal
1121
#undef type_pf_data_isnull
1122
#undef type_pf_data_copy
1123
#undef type_pf_data_swap
1124
#undef type_pf_data_zero_out
1125
#undef type_pf_data_list
1126
#undef type_pf_data_tlist
1129
#undef type_pf_telem
1130
#undef type_pf_data_timeout
1131
#undef type_pf_data_expired
1132
#undef type_pf_data_swap_timeout
1133
#undef type_pf_data_netmask
1134
#undef type_pf_data_timeout_set
1136
#undef type_pf_chash_readd
1137
#undef type_pf_chash_del_elem
1138
#undef type_pf_chash_add
1139
#undef type_pf_chash_del
1140
#undef type_pf_chash_test_cidrs
1141
#undef type_pf_chash_test
1143
#undef type_pf_chash_treadd
1144
#undef type_pf_chash_del_telem
1145
#undef type_pf_chash_expire
1146
#undef type_pf_chash_tadd
1147
#undef type_pf_chash_tdel
1148
#undef type_pf_chash_ttest_cidrs
1149
#undef type_pf_chash_ttest
1151
#undef type_pf_resize
1152
#undef type_pf_tresize
1153
#undef type_pf_flush
1154
#undef type_pf_destroy
1157
#undef type_pf_tlist
1158
#undef type_pf_same_set
1162
#undef type_pf_gc_init
1163
#undef type_pf_variant
1164
#undef type_pf_tvariant