456
456
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
457
457
extern void xt_free_table_info(struct xt_table_info *info);
460
* Per-CPU spinlock associated with per-cpu table entries, and
461
* with a counter for the "reading" side that allows a recursive
462
* reader to avoid taking the lock and deadlocking.
464
* "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
465
* It needs to ensure that the rules are not being changed while the packet
466
* is being processed. In some cases, the read lock will be acquired
467
* twice on the same CPU; this is okay because of the count.
469
* "writing" is used when reading counters.
470
* During replace any readers that are using the old tables have to complete
471
* before freeing the old table. This is handled by the write locking
472
* necessary for reading the counters.
474
struct xt_info_lock {
476
unsigned char readers;
478
DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
481
* Note: we need to ensure that preemption is disabled before acquiring
482
* the per-cpu-variable, so we do it as a two step process rather than
483
* using "spin_lock_bh()".
485
* We _also_ need to disable bottom half processing before updating our
486
* nesting count, to make sure that the only kind of re-entrancy is this
487
* code being called by itself: since the count+lock is not an atomic
488
* operation, we can allow no races.
490
* _Only_ that special combination of being per-cpu and never getting
491
* re-entered asynchronously means that the count is safe.
493
static inline void xt_info_rdlock_bh(void)
495
struct xt_info_lock *lock;
498
lock = &__get_cpu_var(xt_info_locks);
499
if (likely(!lock->readers++))
500
write_seqlock(&lock->lock);
503
static inline void xt_info_rdunlock_bh(void)
505
struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
507
if (likely(!--lock->readers))
508
write_sequnlock(&lock->lock);
513
* The "writer" side needs to get exclusive access to the lock,
514
* regardless of readers. This must be called with bottom half
515
* processing (and thus also preemption) disabled.
517
static inline void xt_info_wrlock(unsigned int cpu)
519
write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
522
static inline void xt_info_wrunlock(unsigned int cpu)
524
write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
460
* xt_recseq - recursive seqcount for netfilter use
462
* Packet processing changes the seqcount only if no recursion happened
463
* get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
464
* because we use the normal seqcount convention :
465
* Low order bit set to 1 if a writer is active.
467
DECLARE_PER_CPU(seqcount_t, xt_recseq);
470
* xt_write_recseq_begin - start of a write section
472
* Begin packet processing : all readers must wait the end
473
* 1) Must be called with preemption disabled
474
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
476
* 1 if no recursion on this cpu
477
* 0 if recursion detected
479
static inline unsigned int xt_write_recseq_begin(void)
484
* Low order bit of sequence is set if we already
485
* called xt_write_recseq_begin().
487
addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
490
* This is kind of a write_seqcount_begin(), but addend is 0 or 1
491
* We dont check addend value to avoid a test and conditional jump,
492
* since addend is most likely 1
494
__this_cpu_add(xt_recseq.sequence, addend);
501
* xt_write_recseq_end - end of a write section
502
* @addend: return value from previous xt_write_recseq_begin()
504
* End packet processing : all readers can proceed
505
* 1) Must be called with preemption disabled
506
* 2) softirqs must be disabled too (or we should use irqsafe_cpu_add())
508
static inline void xt_write_recseq_end(unsigned int addend)
510
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
512
__this_cpu_add(xt_recseq.sequence, addend);