1
/* SCTP kernel reference Implementation
2
* Copyright (c) 1999-2000 Cisco, Inc.
3
* Copyright (c) 1999-2001 Motorola, Inc.
4
* Copyright (c) 2001-2002 International Business Machines, Corp.
5
* Copyright (c) 2001 Intel Corp.
7
* This file is part of the SCTP kernel reference Implementation
9
* $Header: /cvsroot/lksctp/lksctp/test/test_frame.h,v 1.10 2002/07/23 15:58:48 jgrimm Exp $
11
* This header holds things moved out of the kernel header files
12
* but needed for the testframe.
14
* The SCTP reference implementation is free software;
15
* you can redistribute it and/or modify it under the terms of
16
* the GNU General Public License as published by
17
* the Free Software Foundation; either version 2, or (at your option)
20
* The SCTP reference implementation is distributed in the hope that it
21
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
22
* ************************
23
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24
* See the GNU General Public License for more details.
26
* You should have received a copy of the GNU General Public License
27
* along with GNU CC; see the file COPYING. If not, write to
28
* the Free Software Foundation, 59 Temple Place - Suite 330,
29
* Boston, MA 02111-1307, USA.
31
* Please send any bug reports or fixes you make to the
33
* lksctp developers <sctp-developers-list@cig.mot.com>
35
* Or submit a bug report through the following website:
36
* http://www.sf.net/projects/lksctp
38
* Written or modified by:
39
* La Monte H.P. Yarroll <piggy@acm.org>
40
* Xingang Guo <xingang.guo@intel.com>
41
* Jon Grimm <jgrimm@us.ibm.com>
42
* Sridhar Samudrala <sri@us.ibm.com>
44
* Any bugs reported given to us we will try to fix... any fixes shared will
45
* be incorporated into the next SCTP release.
48
#ifndef __test_frame_h__
55
#undef __set_current_state
57
#define get_user(x, ptr) ({ x = *ptr; 0; })
58
#define put_user(x, ptr) ({ *ptr = x; 0; })
59
#define access_ok(x, y, z) ({1==1;})
60
#define __set_current_state(x)
61
#define signal_pending(x) 0
67
static inline void sctp_spin_lock(spinlock_t *lock){ return; }
68
static inline void sctp_spin_unlock(spinlock_t *lock) { return; }
69
static inline void sctp_write_lock(rwlock_t *lock){ return; }
70
static inline void sctp_write_unlock(rwlock_t *lock) { return; }
71
static inline void sctp_read_lock(rwlock_t *lock){ return; }
72
static inline void sctp_read_unlock(rwlock_t *lock) { return; }
73
static inline void sctp_local_bh_disable(void) { return; }
74
static inline void sctp_local_bh_enable(void) { return; }
76
sctp_spin_lock_irqsave(spinlock_t *lock, unsigned long flags) { return; }
78
sctp_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { return; }
80
/* This is the per-socket lock. The spinlock provides synchronization
81
* between user contexts and software interrupt processing, whereas the
82
* mini-semaphore synchronizes multiple users amongst themselves.
84
* Moving here until we use this again. Right now the locking granularity
85
* is at the socket level. Some day we may want to do association
94
extern int ft_sctp_lock_bug;
96
/* Control level of debugging for the testframe.
97
* 0 - Set the ft_frame_sctp_lock_bug only.
99
* 2 - Check a little more stringently and halt.
101
extern int ft_sctp_lock_assert;
103
/* Initialize the sctp_lock. */
104
static inline void sctp_lock_init(sctp_lock_t *lock)
106
spin_lock_init(&lock->slock);
110
} /* sctp_lock_init() */
112
/* Acquire the user lock. */
113
static inline void sctp_lock_acquire(sctp_lock_t *lock)
115
/* Ignore stringent checks. */
116
if (ft_sctp_lock_assert != 1) {
117
if (spin_is_locked(&lock->slock)) {
121
spin_lock(&lock->slock);
122
if (lock->users != 0) {
123
/* The test frame is single threaded, so until we figure out
124
* a good wait to emulate waitqueues, this is an error.
130
spin_unlock(&lock->slock);
134
spin_unlock(&lock->slock);
136
if (ft_sctp_lock_assert) {
142
} /* sctp_lock_acquire() */
144
/* Release the user lock. */
145
static inline void sctp_lock_release(sctp_lock_t *lock) {
146
spin_lock(&lock->slock);
147
if (lock->users != 1) {
148
/* The test frame is single threaded. */
153
spin_unlock(&lock->slock);
157
spin_unlock(&lock->slock);
158
if (ft_sctp_lock_assert) {
164
} /* sctp_lock_release() */
166
/* Check whether the bh really owns the lock (or else there is
167
* a task in the lock too. The spinlock should be aquired before
170
static inline int sctp_lock_bh_locked(sctp_lock_t *lock)
172
if (!spin_is_locked(&lock->slock)) {
173
printk("The lock must be acquired first.\n");
176
return( lock->users ? 0 : 1);
179
if (ft_sctp_lock_assert) {
182
ft_sctp_lock_bug = 1;
185
} /* sctp_lock_bh_locked() */
187
/* Acquire the lock, BH version. */
188
static inline void sctp_lock_bh_acquire(sctp_lock_t *lock)
190
/* If it is already locked, something is wrong as the testframe
191
* is single threaded.
193
if (ft_sctp_lock_assert != 1) {
194
if (spin_is_locked(&lock->slock)) {
198
spin_lock(&lock->slock);
202
if (ft_sctp_lock_assert) {
205
ft_sctp_lock_bug = 1;
209
} /* sctp_lock_bh_acquire() */
211
/* Release the lock, BH version. */
212
static inline void sctp_lock_bh_release(sctp_lock_t *lock) {
214
/* If the lock is not held, we have a mismatch.
217
if (!spin_is_locked(&lock->slock)) {
221
spin_unlock(&lock->slock);
225
if (ft_sctp_lock_assert) {
228
ft_sctp_lock_bug = 1;
231
} /* sctp_lock_bh_release() */
234
/* Determine if this is a valid kernel address.
237
sctp_is_valid_kaddr(unsigned long addr)
239
return(addr && (addr < PAGE_OFFSET));
241
} /* sctp_is_valid_kaddr() */
244
#define sctp_lock_sock(sk) do {} while(0)
245
#define sctp_release_sock(sk) do {} while(0)
246
#define sctp_bh_lock_sock(sk) do {} while(0)
247
#define sctp_bh_unlock_sock(sk) do {} while(0)
248
#define __sctp_sock_busy(sk) 0
249
#define SCTP_SOCK_SLEEP_PRE(sk)
250
#define SCTP_SOCK_SLEEP_POST(sk)
254
#if 0 /* FIXME Discourage use until locking gets decided. */
255
#define sctp_lock_asoc(__asoc) do {} while(0)
256
#define sctp_release_asoc(__asoc) do {} while(0)
259
#define sctp_bh_lock_asoc(__asoc) do {} while(0)
260
#define sctp_bh_unlock_asoc(__asoc) do {} while(0)
266
void sctp_exit(void);
271
int sctp_bind(struct sock *, struct sockaddr *, int);
272
int sctp_bindx(struct sock *, struct sockaddr_storage *, int, int);
273
int sctp_connect(struct sock *, struct sockaddr *, int);
274
void sctp_close(struct sock *, long);
275
int sctp_recvmsg(struct kiocb *, struct sock *, struct msghdr *, size_t, int,
277
int sctp_sendmsg(struct kiocb *, struct sock *, struct msghdr *, size_t);
278
int sctp_setsockopt(struct sock *, int, int, char *, int);
279
int sctp_getsockopt(struct sock *, int, int, char *, int *);
280
int sctp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
281
int sctp_disconnect(struct sock *sk, int flags);
282
struct sock *sctp_accept(struct sock *sk, int flags, int *err);
283
int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg);
284
int sctp_init_sock(struct sock *sk);
285
int sctp_destroy_sock(struct sock *sk);
286
void sctp_shutdown(struct sock *sk, int how);
287
int sctp_seqpacket_listen(struct sock *sk, int backlog);
288
int sctp_stream_listen(struct sock *sk, int backlog);
289
int sctp_do_peeloff(struct sctp_association *, struct socket **);
291
#undef IP_INC_STATS_BH
292
#define IP_INC_STATS_BH(x)
294
#undef NET_INC_STATS_BH
295
#define NET_INC_STATS_BH(x)
297
#undef ICMP_INC_STATS_BH
298
#define ICMP_INC_STATS_BH(x)
300
#undef SCTP_INC_STATS
301
#define SCTP_INC_STATS(x)
303
#undef SCTP_INC_STATS_BH
304
#define SCTP_INC_STATS_BH(x)
306
#undef SCTP_INC_STATS_USER
307
#define SCTP_INC_STATS_USER(x)
309
#undef SCTP_DEC_STATS
310
#define SCTP_DEC_STATS(x)
312
DECLARE_SNMP_STAT(struct sctp_mib, sctp_statistics);
313
#endif /* __test_frame_h__ */