1
diff -c -r1.1.1.1 pthread.h
2
*** pthread.h 1996/03/13 04:30:57 1.1.1.1
3
--- pthread.h 1996/10/02 17:52:47
9
* -Started coding this file.
11
+ * 93/9/28 streepy - Added support for pthread cancel
19
/* More includes, that need size_t */
20
#include <pthread/pthread_attr.h>
22
+ /* Constants for use with pthread_setcancelstate and pthread_setcanceltype */
23
+ #define PTHREAD_CANCEL_DISABLE 0
24
+ #define PTHREAD_CANCEL_ENABLE 1
25
+ #define PTHREAD_CANCEL_DEFERRED 0
26
+ #define PTHREAD_CANCEL_ASYNCHRONOUS 1
28
+ #define PTHREAD_CANCELLED (void *)1 /* Exit status of a cancelled thread */
33
#include <signal.h> /* for sigset_t */
39
- #define PF_WAIT_EVENT 0x01
40
- #define PF_DONE_EVENT 0x02
42
/* Put PANIC inside an expression that evaluates to non-void type, to
43
make it easier to combine it in expressions. */
44
! #define DO_PANIC() (PANIC (), 0)
45
! #define PANICIF(x) ((x) ? DO_PANIC () : 0)
47
! #define SET_PF_DONE_EVENT(x) \
48
! ( !(x->flags & PF_DONE_EVENT) \
49
! ? ( (x->flags & PF_WAIT_EVENT) \
50
! ? (x->flags = PF_DONE_EVENT, OK) \
54
! #define SET_PF_WAIT_EVENT(x) \
55
! ( PANICIF (x->flags & (PF_WAIT_EVENT | PF_DONE_EVENT)), \
56
! (x->flags = PF_WAIT_EVENT), 0)
58
! #define CLEAR_PF_DONE_EVENT(x) \
59
! ( PANICIF (!(x->flags & PF_DONE_EVENT)), \
62
struct pthread_select_data {
69
union pthread_wait_data {
70
! pthread_mutex_t * mutex;
71
! pthread_cond_t * cond;
72
! const sigset_t * sigwait; /* Waiting on a signal in sigwait */
74
! short fd; /* Used when thread waiting on fd */
75
! short branch; /* line number, for debugging */
77
struct pthread_select_data * select_data;
83
/* Put PANIC inside an expression that evaluates to non-void type, to
84
make it easier to combine it in expressions. */
85
! #define DO_PANIC() (PANIC (), 0)
86
! #define PANICIF(x) ((x) ? DO_PANIC () : 0)
88
! /* In the thread flag field, we use a series of bit flags. Flags can
89
! * organized into "groups" of mutually exclusive flags. Other flags
90
! * are unrelated and can be set and cleared with a single bit operation.
93
! #define PF_WAIT_EVENT 0x01
94
! #define PF_DONE_EVENT 0x02
95
! #define PF_EVENT_GROUP 0x03 /* All event bits */
97
! #define PF_CANCEL_STATE 0x04 /* cancellability state */
98
! #define PF_CANCEL_TYPE 0x08 /* cancellability type */
99
! #define PF_THREAD_CANCELLED 0x10 /* thread has been cancelled */
100
! #define PF_RUNNING_TO_CANCEL 0x20 /* Thread is running so it can cancel*/
101
! #define PF_AT_CANCEL_POINT 0x40 /* Thread is at a cancel point */
103
! /* Flag operations */
105
! #define SET_PF_FLAG(x,f) ( (x)->flags |= (f) )
106
! #define TEST_PF_FLAG(x,f) ( (x)->flags & (f) )
107
! #define CLEAR_PF_FLAG(x,f) ( (x)->flags &= ~(f) )
108
! #define CLEAR_PF_GROUP(x,g) ( (x)->flags &= ~(g) )
109
! #define SET_PF_FLAG_IN_GROUP(x,g,f) ( CLEAR_PF_GROUP(x,g),SET_PF_FLAG(x,f))
110
! #define TEST_PF_GROUP(x,g) ( (x)->flags & (g) )
112
! #define SET_PF_DONE_EVENT(x) \
113
! ( !TEST_PF_FLAG(x,PF_DONE_EVENT) \
114
! ? ( TEST_PF_FLAG(x,PF_WAIT_EVENT) \
115
! ? (SET_PF_FLAG_IN_GROUP(x,PF_EVENT_GROUP,PF_DONE_EVENT), OK) \
119
! #define SET_PF_WAIT_EVENT(x) \
120
! ( PANICIF (TEST_PF_GROUP(x,PF_EVENT_GROUP) ), \
121
! SET_PF_FLAG_IN_GROUP(x,PF_EVENT_GROUP,PF_WAIT_EVENT), 0)
123
! #define CLEAR_PF_DONE_EVENT(x) \
124
! ( PANICIF (!TEST_PF_FLAG(x,PF_DONE_EVENT)), \
125
! CLEAR_PF_GROUP(x,PF_EVENT_GROUP) )
127
! #define SET_PF_CANCELLED(x) ( SET_PF_FLAG(x,PF_THREAD_CANCELLED) )
128
! #define TEST_PF_CANCELLED(x) ( TEST_PF_FLAG(x,PF_THREAD_CANCELLED) )
130
! #define SET_PF_RUNNING_TO_CANCEL(x) ( SET_PF_FLAG(x,PF_RUNNING_TO_CANCEL) )
131
! #define CLEAR_PF_RUNNING_TO_CANCEL(x)( CLEAR_PF_FLAG(x,PF_RUNNING_TO_CANCEL) )
132
! #define TEST_PF_RUNNING_TO_CANCEL(x)( TEST_PF_FLAG(x,PF_RUNNING_TO_CANCEL) )
134
! #define SET_PF_AT_CANCEL_POINT(x) ( SET_PF_FLAG(x,PF_AT_CANCEL_POINT) )
135
! #define CLEAR_PF_AT_CANCEL_POINT(x) ( CLEAR_PF_FLAG(x,PF_AT_CANCEL_POINT) )
136
! #define TEST_PF_AT_CANCEL_POINT(x) ( TEST_PF_FLAG(x,PF_AT_CANCEL_POINT) )
138
! #define SET_PF_CANCEL_STATE(x,f) \
139
! ( (f) ? SET_PF_FLAG(x,PF_CANCEL_STATE) : CLEAR_PF_FLAG(x,PF_CANCEL_STATE) )
140
! #define TEST_PF_CANCEL_STATE(x) \
141
! ( (TEST_PF_FLAG(x,PF_CANCEL_STATE)) ? PTHREAD_CANCEL_ENABLE \
142
! : PTHREAD_CANCEL_DISABLE )
144
! #define SET_PF_CANCEL_TYPE(x,f) \
145
! ( (f) ? SET_PF_FLAG(x,PF_CANCEL_TYPE) : CLEAR_PF_FLAG(x,PF_CANCEL_TYPE) )
146
! #define TEST_PF_CANCEL_TYPE(x) \
147
! ( (TEST_PF_FLAG(x,PF_CANCEL_TYPE)) ? PTHREAD_CANCEL_ASYNCHRONOUS \
148
! : PTHREAD_CANCEL_DEFERRED )
150
! /* See if a thread is in a state that it can be cancelled */
151
! #define TEST_PTHREAD_IS_CANCELLABLE(x) \
152
! ( (TEST_PF_CANCEL_STATE(x) == PTHREAD_CANCEL_ENABLE && TEST_PF_CANCELLED(x)) \
153
! ? ((TEST_PF_CANCEL_TYPE(x) == PTHREAD_CANCEL_ASYNCHRONOUS) \
155
! : TEST_PF_AT_CANCEL_POINT(x)) \
159
struct pthread_select_data {
166
union pthread_wait_data {
167
! pthread_mutex_t * mutex;
168
! pthread_cond_t * cond;
169
! const sigset_t * sigwait; /* Waiting on a signal in sigwait */
171
! short fd; /* Used when thread waiting on fd */
172
! short branch; /* line number, for debugging */
174
struct pthread_select_data * select_data;
178
#define PTT_USER_THREAD 0x0001
182
struct machdep_pthread machdep_data;
183
! pthread_attr_t attr;
185
/* Signal interface */
187
! sigset_t sigpending;
188
! int sigcount; /* Number of signals pending */
191
! struct timespec wakeup_time;
193
/* Join queue for waiting threads */
194
struct pthread_queue join_queue;
198
* Thread implementations are just multiple queue type implemenations,
199
* Below are the various link lists currently necessary
201
#define PTT_USER_THREAD 0x0001
205
struct machdep_pthread machdep_data;
206
! pthread_attr_t attr;
208
/* Signal interface */
210
! sigset_t sigpending;
211
! int sigcount; /* Number of signals pending */
214
! struct timespec wakeup_time;
216
/* Join queue for waiting threads */
217
struct pthread_queue join_queue;
220
* Thread implementations are just multiple queue type implemenations,
221
* Below are the various link lists currently necessary
224
* ALL threads, in any state.
225
* Must lock kernel lock before manipulating.
227
! struct pthread * pll;
230
* Standard link list for running threads, mutexes, etc ...
231
* It can't be on both a running link list and a wait queue.
232
* Must lock kernel lock before manipulating.
234
! struct pthread * next;
235
union pthread_wait_data data;
239
* ALL threads, in any state.
240
* Must lock kernel lock before manipulating.
242
! struct pthread * pll;
245
* Standard link list for running threads, mutexes, etc ...
246
* It can't be on both a running link list and a wait queue.
247
* Must lock kernel lock before manipulating.
249
! struct pthread * next;
250
union pthread_wait_data data;
255
* (Note: "priority" is a reserved word in Concurrent C, please
256
* don't use it. --KR)
258
! struct pthread_queue * queue;
259
! enum pthread_state state;
261
! char pthread_priority;
264
* Sleep queue, this is different from the standard link list
265
* because it is possible to be on both (pthread_cond_timedwait();
266
* Must lock sleep mutex before manipulating
268
! struct pthread *sll; /* For sleeping threads */
271
* Data that doesn't need to be locked
272
! * Mostly it's because only the thread owning the data can manipulate it
277
! const void ** specific_data;
278
! int specific_data_count;
280
/* Cleanup handlers Link List */
281
struct pthread_cleanup *cleanup;
285
#else /* not PTHREAD_KERNEL */
287
* (Note: "priority" is a reserved word in Concurrent C, please
288
* don't use it. --KR)
290
! struct pthread_queue * queue;
291
! enum pthread_state state;
292
! enum pthread_state old_state; /* Used when cancelled */
294
! char pthread_priority;
297
* Sleep queue, this is different from the standard link list
298
* because it is possible to be on both (pthread_cond_timedwait();
299
* Must lock sleep mutex before manipulating
301
! struct pthread *sll; /* For sleeping threads */
304
* Data that doesn't need to be locked
305
! * Mostly because only the thread owning the data can manipulate it
310
! const void ** specific_data;
311
! int specific_data_count;
313
/* Cleanup handlers Link List */
314
struct pthread_cleanup *cleanup;
317
#else /* not PTHREAD_KERNEL */
323
! typedef struct pthread * pthread_t;
328
#ifdef PTHREAD_KERNEL
330
! extern struct pthread * pthread_run;
331
! extern struct pthread * pthread_initial;
332
! extern struct pthread * pthread_link_list;
333
extern struct pthread_queue pthread_dead_queue;
334
extern struct pthread_queue pthread_alloc_queue;
336
! extern pthread_attr_t pthread_attr_default;
337
! extern volatile int fork_lock;
338
! extern pthread_size_t pthread_pagesize;
340
! extern sigset_t * uthread_sigmask;
348
! typedef struct pthread *pthread_t;
353
#ifdef PTHREAD_KERNEL
355
! extern struct pthread * pthread_run;
356
! extern struct pthread * pthread_initial;
357
! extern struct pthread * pthread_link_list;
358
extern struct pthread_queue pthread_dead_queue;
359
extern struct pthread_queue pthread_alloc_queue;
361
! extern pthread_attr_t pthread_attr_default;
362
! extern volatile int fork_lock;
363
! extern pthread_size_t pthread_pagesize;
365
! extern sigset_t * uthread_sigmask;
367
! /* Kernel global functions */
368
! extern void pthread_sched_prevent(void);
369
! extern void pthread_sched_resume(void);
370
! extern int __pthread_is_valid( pthread_t );
371
! extern void pthread_cancel_internal( int freelocks );
378
#if defined(DCE_COMPAT)
380
! typedef void * (*pthread_startroutine_t)(void *)
381
! typedef void * pthread_addr_t
383
! int pthread_create __P((pthread_t *, pthread_attr_t,
384
! pthread_startroutine_t,
386
! void pthread_exit __P((pthread_addr_t));
387
! int pthread_join __P((pthread_t, pthread_addr_t *));
391
! void pthread_init __P((void));
392
! int pthread_create __P((pthread_t *,
393
! const pthread_attr_t *,
394
! void * (*start_routine)(void *),
396
! void pthread_exit __P((void *));
397
! pthread_t pthread_self __P((void));
398
! int pthread_equal __P((pthread_t, pthread_t));
399
! int pthread_join __P((pthread_t, void **));
400
! int pthread_detach __P((pthread_t));
401
! void pthread_yield __P((void));
403
! int pthread_setschedparam __P((pthread_t pthread, int policy,
404
! struct sched_param * param));
405
! int pthread_getschedparam __P((pthread_t pthread, int * policy,
406
! struct sched_param * param));
408
! int pthread_kill __P((struct pthread *, int));
409
! int pthread_signal __P((int, void (*)(int)));
413
#if defined(PTHREAD_KERNEL)
415
/* Not valid, but I can't spell so this will be caught at compile time */
416
! #define pthread_yeild(notvalid)
422
#if defined(DCE_COMPAT)
424
! typedef void * (*pthread_startroutine_t)(void *);
425
! typedef void * pthread_addr_t;
427
! int pthread_create __P((pthread_t *, pthread_attr_t,
428
! pthread_startroutine_t, pthread_addr_t));
429
! void pthread_exit __P((pthread_addr_t));
430
! int pthread_join __P((pthread_t, pthread_addr_t *));
434
! void pthread_init __P((void));
435
! int pthread_create __P((pthread_t *, const pthread_attr_t *,
436
! void * (*start_routine)(void *), void *));
437
! void pthread_exit __P((void *));
438
! pthread_t pthread_self __P((void));
439
! int pthread_equal __P((pthread_t, pthread_t));
440
! int pthread_join __P((pthread_t, void **));
441
! int pthread_detach __P((pthread_t));
442
! void pthread_yield __P((void));
444
! int pthread_setschedparam __P((pthread_t pthread, int policy,
445
! struct sched_param * param));
446
! int pthread_getschedparam __P((pthread_t pthread, int * policy,
447
! struct sched_param * param));
449
! int pthread_kill __P((struct pthread *, int));
450
! int pthread_signal __P((int, void (*)(int)));
452
! int pthread_cancel __P(( pthread_t pthread ));
453
! int pthread_setcancelstate __P(( int state, int *oldstate ));
454
! int pthread_setcanceltype __P(( int type, int *oldtype ));
455
! void pthread_testcancel __P(( void ));
459
#if defined(PTHREAD_KERNEL)
461
/* Not valid, but I can't spell so this will be caught at compile time */
462
! #define pthread_yeild(notvalid)
466
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/include/signal.h,v retrieving revision 1.1.1.1
467
diff -c -r1.1.1.1 signal.h
468
*** signal.h 1995/12/25 03:03:09 1.1.1.1
469
--- signal.h 1996/09/26 21:46:04
475
int raise __P((int));
476
+ __sighandler_t signal __P((int __sig, __sighandler_t));
480
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/include/pthread/kernel.h,v retrieving revision 1.1.1.1
481
diff -c -r1.1.1.1 kernel.h
482
*** kernel.h 1994/12/13 07:09:01 1.1.1.1
483
--- kernel.h 1996/10/02 19:08:41
487
#if defined(PTHREAD_KERNEL)
489
! #define PANIC() abort()
491
/* Time each rr thread gets */
492
#define PTHREAD_RR_TIMEOUT 100000000
495
#if defined(PTHREAD_KERNEL)
498
! #include <assert.h>
499
! #define PANIC() panic_kernel( __FILE__, __LINE__, __ASSERT_FUNCTION )
501
! #define PANIC() panic_kernel( __FILE__, __LINE__, (const char *)0 )
505
/* Time each rr thread gets */
506
#define PTHREAD_RR_TIMEOUT 100000000
507
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/syscall-i386-linux-1.0.S,v retrieving revision 1.1.1.1
508
diff -c -r1.1.1.1 syscall-i386-linux-1.0.S
509
*** syscall-i386-linux-1.0.S 1995/09/27 04:38:55 1.1.1.1
510
--- syscall-i386-linux-1.0.S 1996/06/04 19:20:17
514
/* =========================================================================
516
! * fork 2 socketcall 102
523
/* =========================================================================
525
! * fork 2 fstatfs 100
526
! * read 3 socketcall 102
551
/* ==========================================================================
552
! * machdep_sys_fstat()
560
/* ==========================================================================
561
* machdep_sys_ftruncate()
565
/* ==========================================================================
566
! * machdep_sys_lstat()
574
+ /* ==========================================================================
575
+ * machdep_sys_fstatfs()
580
/* ==========================================================================
581
* machdep_sys_ftruncate()
582
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/linux-1.0/socket.h,v retrieving revision 1.1.1.1
583
diff -c -r1.1.1.1 socket.h
584
*** socket.h 1995/12/26 02:28:03 1.1.1.1
585
--- socket.h 1996/09/27 18:12:45
591
! /* #include <asm/socket.h> /* arch-dependent defines */
592
#include <linux/sockios.h> /* the SIOCxxx I/O controls */
593
#include <pthread/posix.h>
599
! /* #include <asm/socket.h> arch-dependent defines */
600
#include <linux/sockios.h> /* the SIOCxxx I/O controls */
601
#include <pthread/posix.h>
606
int connect __P((int, const struct sockaddr *, int));
607
int listen __P((int, int));
608
int socket __P((int, int, int));
610
+ int getsockopt __P ((int __s, int __level, int __optname,
611
+ void *__optval, int *__optlen));
612
+ int setsockopt __P ((int __s, int __level, int __optname,
613
+ __const void *__optval, int optlen));
614
+ int getsockname __P ((int __sockfd, struct sockaddr *__addr,
616
+ int getpeername __P ((int __sockfd, struct sockaddr *__peer,
618
+ ssize_t send __P ((int __sockfd, __const void *__buff, size_t __len, int __flags));
619
+ ssize_t recv __P ((int __sockfd, void *__buff, size_t __len, int __flags));
620
+ ssize_t sendto __P ((int __sockfd, __const void *__buff, size_t __len,
621
+ int __flags, __const struct sockaddr *__to,
623
+ ssize_t recvfrom __P ((int __sockfd, void *__buff, size_t __len,
624
+ int __flags, struct sockaddr *__from,
626
+ extern ssize_t sendmsg __P ((int __fd, __const struct msghdr *__message,
628
+ extern ssize_t recvmsg __P ((int __fd, struct msghdr *__message,
630
+ int shutdown __P ((int __sockfd, int __how));
634
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/linux-1.0/timers.h,v retrieving revision 1.1.1.1
635
diff -c -r1.1.1.1 timers.h
636
*** timers.h 1996/03/05 08:28:36 1.1.1.1
637
--- timers.h 1996/05/25 21:30:08
641
#include <sys/types.h>
644
+ #ifndef _LINUX_TIME_H
649
+ #endif /* _LINUX_TIME_H */
651
#define TIMEVAL_TO_TIMESPEC(tv, ts) { \
652
(ts)->tv_sec = (tv)->tv_sec; \
653
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/getprotoent.c,v retrieving revision 1.1.1.1
654
diff -c -r1.1.1.1 getprotoent.c
655
*** getprotoent.c 1996/02/09 05:39:41 1.1.1.1
656
--- getprotoent.c 1996/05/27 01:11:27
666
pthread_mutex_unlock(&proto_file_lock);
675
pthread_mutex_unlock(&proto_file_lock);
676
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/proto_internal.c,v retrieving revision 1.1.1.1
677
diff -c -r1.1.1.1 proto_internal.c
678
*** proto_internal.c 1996/02/09 05:39:49 1.1.1.1
679
--- proto_internal.c 1996/06/04 16:25:57
682
static int init_status;
684
/* Performs global initialization. */
685
! char *_proto_init()
690
static int init_status;
692
/* Performs global initialization. */
700
init_status = pthread_key_create(&key, free);
704
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/res_internal.c,v retrieving revision 1.1.1.1
705
diff -c -r1.1.1.1 res_internal.c
706
*** res_internal.c 1996/02/09 05:39:53 1.1.1.1
707
--- res_internal.c 1996/09/25 23:31:11
714
+ bp += strlen(bp) + 1;
718
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/GNUmakefile.inc,v retrieving revision 1.1.1.1
719
diff -c -r1.1.1.1 GNUmakefile.inc
720
*** GNUmakefile.inc 1995/08/30 22:27:04 1.1.1.1
721
--- GNUmakefile.inc 1996/10/02 19:04:29
724
syscall.S pthread_join.c pthread_detach.c pthread_once.c sleep.c \
725
specific.c process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
726
pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
727
! dump_state.c pthread_kill.c stat.c readv.c writev.c condattr.c $(SRCS)
729
ifeq ($(HAVE_SYSCALL_TEMPLATE),yes)
730
SYSCALL_FILTER_RULE= for s in $(AVAILABLE_SYSCALLS) ; do \
732
syscall.S pthread_join.c pthread_detach.c pthread_once.c sleep.c \
733
specific.c process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
734
pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
735
! dump_state.c pthread_kill.c stat.c readv.c writev.c condattr.c \
736
! pthread_cancel.c panic.c $(SRCS)
738
ifeq ($(HAVE_SYSCALL_TEMPLATE),yes)
739
SYSCALL_FILTER_RULE= for s in $(AVAILABLE_SYSCALLS) ; do \
740
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/Makefile.inc,v retrieving revision 1.1.1.1
741
diff -c -r1.1.1.1 Makefile.inc
742
*** Makefile.inc 1995/08/22 22:09:07 1.1.1.1
743
--- Makefile.inc 1996/10/02 19:04:38
746
pthread_join.c pthread_detach.c pthread_once.c sleep.c specific.c \
747
process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
748
pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
749
! dump_state.c pthread_kill.c condattr.c
751
.if $(HAVE_SYSCALL_TEMPLATE) == yes
754
pthread_join.c pthread_detach.c pthread_once.c sleep.c specific.c \
755
process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \
756
pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \
757
! dump_state.c pthread_kill.c condattr.c pthread_cancel.c panic.c
759
.if $(HAVE_SYSCALL_TEMPLATE) == yes
761
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/cond.c,v retrieving revision 1.1.1.1
762
diff -c -r1.1.1.1 cond.c
763
*** cond.c 1996/03/05 08:29:12 1.1.1.1
764
--- cond.c 1996/10/03 18:19:04
768
pthread_queue_enq(&cond->c_queue, pthread_run);
769
pthread_mutex_unlock(mutex);
771
+ pthread_run->data.mutex = mutex;
773
SET_PF_WAIT_EVENT(pthread_run);
774
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
775
/* Reschedule will unlock pthread_run */
776
pthread_resched_resume(PS_COND_WAIT);
777
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
778
CLEAR_PF_DONE_EVENT(pthread_run);
780
+ pthread_run->data.mutex = NULL;
782
rval = pthread_mutex_lock(mutex);
788
pthread_mutex_unlock(mutex);
789
mutex->m_data.m_count = 1;
791
+ pthread_run->data.mutex = mutex;
793
SET_PF_WAIT_EVENT(pthread_run);
794
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
795
/* Reschedule will unlock pthread_run */
796
pthread_resched_resume(PS_COND_WAIT);
797
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
798
CLEAR_PF_DONE_EVENT(pthread_run);
800
+ pthread_run->data.mutex = NULL;
802
rval = pthread_mutex_lock(mutex);
803
mutex->m_data.m_count = count;
808
SET_PF_WAIT_EVENT(pthread_run);
809
pthread_mutex_unlock(mutex);
811
+ pthread_run->data.mutex = mutex;
813
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
814
/* Reschedule will unlock pthread_run */
815
pthread_resched_resume(PS_COND_WAIT);
816
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
818
+ pthread_run->data.mutex = NULL;
820
/* Remove ourselves from sleep queue. If we fail then we timedout */
821
if (sleep_cancel(pthread_run) == NOTOK) {
825
SET_PF_WAIT_EVENT(pthread_run);
826
pthread_mutex_unlock(mutex);
828
+ pthread_run->data.mutex = mutex;
830
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
831
/* Reschedule will unlock pthread_run */
832
pthread_resched_resume(PS_COND_WAIT);
833
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
835
+ pthread_run->data.mutex = NULL;
837
/* Remove ourselves from sleep queue. If we fail then we timedout */
838
if (sleep_cancel(pthread_run) == NOTOK) {
839
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/fd.c,v retrieving revision 1.1.1.1
840
diff -c -r1.1.1.1 fd.c
841
*** fd.c 1996/02/09 02:54:19 1.1.1.1
842
--- fd.c 1996/10/03 01:33:03
846
#include <sys/types.h>
847
#include <sys/stat.h>
849
+ #include <sys/ioctl.h>
853
+ #include <varargs.h>
857
#include <pthread/posix.h>
861
static const int dtablecount = 4096/sizeof(struct fd_table_entry);
864
+ static int fd_get_pthread_fd_from_kernel_fd( int );
866
/* ==========================================================================
867
* Allocate dtablecount entries at once and populate the fd_table.
875
+ /*----------------------------------------------------------------------
876
+ * Function: fd_get_pthread_fd_from_kernel_fd
877
+ * Purpose: get the fd_table index of a kernel fd
878
+ * Args: fd = kernel fd to convert
879
+ * Returns: fd_table index, -1 if not found
881
+ *----------------------------------------------------------------------*/
883
+ fd_get_pthread_fd_from_kernel_fd( int kfd )
887
+ /* This is *SICK*, but unless there is a faster way to
888
+ * turn a kernel fd into an fd_table index, this has to do.
890
+ for( j=0; j < dtablesize; j++ ) {
892
+ fd_table[j]->type != FD_NT &&
893
+ fd_table[j]->type != FD_NIU &&
894
+ fd_table[j]->fd.i == kfd ) {
899
+ /* Not listed byfd, Check for kernel fd == pthread fd */
900
+ if( fd_table[kfd] == NULL || fd_table[kfd]->type == FD_NT ) {
901
+ /* Assume that the kernel fd is the same */
905
+ return NOTOK; /* Not found */
908
/* ==========================================================================
909
* fd_basic_basic_unlock()
914
switch (fd_table[fd]->type) {
916
/* If not in use return EBADF error */
926
if (fd_table[fd]->type == FD_NIU) {
937
+ /*----------------------------------------------------------------------
938
+ * Function: fd_unlock_for_cancel
939
+ * Purpose: Unlock all fd locks held prior to being cancelled
944
+ * Assumes the kernel is locked on entry
945
+ *----------------------------------------------------------------------*/
947
+ fd_unlock_for_cancel( void )
950
+ struct pthread_select_data *data;
951
+ int rdlk, wrlk, lktype;
954
+ /* What we do depends on the previous state of the thread */
955
+ switch( pthread_run->old_state ) {
958
+ case PS_SLEEP_WAIT:
965
+ break; /* Nothing to do */
968
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP );
969
+ /* Must reaquire the mutex according to the standard */
970
+ if( pthread_run->data.mutex == NULL ) {
973
+ pthread_mutex_lock( pthread_run->data.mutex );
977
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
978
+ /* Free the lock on the fd being used */
979
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
980
+ if( fd == NOTOK ) {
981
+ PANIC(); /* Can't find fd */
983
+ fd_unlock( fd, FD_READ );
986
+ case PS_FDW_WAIT: /* Waiting on i/o */
987
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
988
+ /* Free the lock on the fd being used */
989
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
990
+ if( fd == NOTOK ) {
991
+ PANIC(); /* Can't find fd */
993
+ fd_unlock( fd, FD_WRITE );
996
+ case PS_SELECT_WAIT:
997
+ data = pthread_run->data.select_data;
999
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
1001
+ for( i = 0; i < data->nfds; i++) {
1002
+ rdlk =(FD_ISSET(i,&data->readfds)
1003
+ || FD_ISSET(i,&data->exceptfds));
1004
+ wrlk = FD_ISSET(i, &data->writefds);
1005
+ lktype = rdlk ? (wrlk ? FD_RDWR : FD_READ) : FD_WRITE;
1007
+ if( ! (rdlk || wrlk) )
1008
+ continue; /* No locks, no unlock */
1010
+ if( (fd = fd_get_pthread_fd_from_kernel_fd( i )) == NOTOK ) {
1011
+ PANIC(); /* Can't find fd */
1014
+ fd_unlock( fd, lktype );
1018
+ case PS_MUTEX_WAIT:
1019
+ PANIC(); /* Should never cancel a mutex wait */
1022
+ PANIC(); /* Unknown thread status */
1026
/* ==========================================================================
1032
ret = fd_table[fd]->ops->read(fd_table[fd]->fd,
1033
fd_table[fd]->flags, buf, nbytes, timeout);
1034
fd_unlock(fd, FD_READ);
1045
ret = fd_table[fd]->ops->readv(fd_table[fd]->fd,
1046
fd_table[fd]->flags, iov, iovcnt, timeout);
1047
fd_unlock(fd, FD_READ);
1058
ret = fd_table[fd]->ops->write(fd_table[fd]->fd,
1059
fd_table[fd]->flags, buf, nbytes, timeout);
1060
fd_unlock(fd, FD_WRITE);
1071
ret = fd_table[fd]->ops->writev(fd_table[fd]->fd,
1072
fd_table[fd]->flags, iov, iovcnt, timeout);
1073
fd_unlock(fd, FD_WRITE);
1083
union fd_data realfd;
1086
/* Need to lock the newfd by hand */
1087
! if (fd < dtablesize) {
1088
! pthread_mutex_lock(&fd_table_mutex);
1089
! if (fd_table[fd]) {
1090
! pthread_mutex_unlock(&fd_table_mutex);
1091
! mutex = &(fd_table[fd]->mutex);
1092
! pthread_mutex_lock(mutex);
1095
! * XXX Gross hack ... because of fork(), any fd closed by the
1096
! * parent should not change the fd of the child, unless it owns it.
1098
! switch(fd_table[fd]->type) {
1100
! pthread_mutex_unlock(mutex);
1105
! * If it's not tested then the only valid possibility is it's
1108
! ret = machdep_sys_close(fd);
1109
! fd_table[fd]->type = FD_NIU;
1110
! pthread_mutex_unlock(mutex);
1112
! case FD_TEST_FULL_DUPLEX:
1113
! case FD_TEST_HALF_DUPLEX:
1114
realfd = fd_table[fd]->fd;
1115
flags = fd_table[fd]->flags;
1116
if ((entry = fd_free(fd)) == NULL) {
1117
! ret = fd_table[fd]->ops->close(realfd, flags);
1119
! /* There can't be any others waiting for fd. */
1120
pthread_mutex_unlock(&entry->mutex);
1121
/* Note: entry->mutex = mutex */
1122
- mutex = &(fd_table[fd]->mutex);
1124
pthread_mutex_unlock(mutex);
1127
- ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL);
1129
- realfd = fd_table[fd]->fd;
1130
- flags = fd_table[fd]->flags;
1131
- pthread_mutex_unlock(mutex);
1132
- if ((entry = fd_free(fd)) == NULL) {
1133
- ret = fd_table[fd]->ops->close(realfd, flags);
1135
- fd_basic_basic_unlock(entry, FD_RDWR);
1136
- pthread_mutex_unlock(&entry->mutex);
1137
- /* Note: entry->mutex = mutex */
1139
- fd_unlock(fd, FD_RDWR);
1141
- pthread_mutex_unlock(mutex);
1146
! /* Don't bother creating a table entry */
1147
! pthread_mutex_unlock(&fd_table_mutex);
1148
! ret = machdep_sys_close(fd);
1155
/* ==========================================================================
1158
* Might need to do more than just what's below.
1160
static inline void fd_basic_dup(int fd, int newfd)
1163
union fd_data realfd;
1166
+ if( fd < 0 || fd >= dtablesize ) {
1171
/* Need to lock the newfd by hand */
1172
! pthread_mutex_lock(&fd_table_mutex);
1173
! if (fd_table[fd]) {
1174
! pthread_mutex_unlock(&fd_table_mutex);
1175
! mutex = &(fd_table[fd]->mutex);
1176
! pthread_mutex_lock(mutex);
1179
! * XXX Gross hack ... because of fork(), any fd closed by the
1180
! * parent should not change the fd of the child, unless it owns it.
1182
! switch(fd_table[fd]->type) {
1184
! pthread_mutex_unlock(mutex);
1189
! * If it's not tested then the only valid possibility is it's
1192
! ret = machdep_sys_close(fd);
1193
! fd_table[fd]->type = FD_NIU;
1194
! pthread_mutex_unlock(mutex);
1196
! case FD_TEST_FULL_DUPLEX:
1197
! case FD_TEST_HALF_DUPLEX:
1198
! realfd = fd_table[fd]->fd;
1199
! flags = fd_table[fd]->flags;
1200
! if ((entry = fd_free(fd)) == NULL) {
1201
! ret = fd_table[fd]->ops->close(realfd, flags);
1203
! /* There can't be any others waiting for fd. */
1204
! pthread_mutex_unlock(&entry->mutex);
1205
! /* Note: entry->mutex = mutex */
1206
! mutex = &(fd_table[fd]->mutex);
1208
! pthread_mutex_unlock(mutex);
1211
! ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL);
1213
realfd = fd_table[fd]->fd;
1214
flags = fd_table[fd]->flags;
1215
+ pthread_mutex_unlock(mutex);
1216
if ((entry = fd_free(fd)) == NULL) {
1217
! ret = fd_table[fd]->ops->close(realfd, flags);
1219
! fd_basic_basic_unlock(entry, FD_RDWR);
1220
pthread_mutex_unlock(&entry->mutex);
1221
/* Note: entry->mutex = mutex */
1223
+ fd_unlock(fd, FD_RDWR);
1225
pthread_mutex_unlock(mutex);
1230
! /* Don't bother creating a table entry */
1231
! pthread_mutex_unlock(&fd_table_mutex);
1232
! ret = machdep_sys_close(fd);
1243
/* ==========================================================================
1246
* Might need to do more than just what's below.
1248
+ * This is a MAJOR guess!! I don't know if the mutext unlock is valid
1249
+ * in the BIG picture. But it seems to be needed to avoid deadlocking
1250
+ * with ourselves when we try to close the duped file descriptor.
1252
static inline void fd_basic_dup(int fd, int newfd)
1257
fd_table[fd]->next = fd_table[newfd];
1258
fd_table[newfd] = fd_table[fd];
1259
fd_table[fd]->count++;
1260
+ pthread_mutex_unlock(&fd_table[newfd]->next->mutex);
1264
/* ==========================================================================
1267
* ala select()... --SNL
1270
! ioctl(int fd, unsigned long request, caddr_t arg)
1274
if (fd < 0 || fd >= dtablesize)
1277
* ala select()... --SNL
1280
! ioctl(int fd, int request, ...)
1283
+ pthread_va_list ap;
1286
+ va_start( ap, request ); /* Get the arg */
1287
+ arg = va_arg(ap,caddr_t);
1290
if (fd < 0 || fd >= dtablesize)
1295
ret = machdep_sys_ioctl(fd, request, arg);
1296
else if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1297
ret = machdep_sys_ioctl(fd_table[fd]->fd.i, request, arg);
1298
+ if( ret == 0 && request == FIONBIO ) {
1299
+ /* Properly set NONBLOCK flag */
1300
+ int v = *(int *)arg;
1302
+ fd_table[fd]->flags |= __FD_NONBLOCK;
1304
+ fd_table[fd]->flags &= ~__FD_NONBLOCK;
1306
fd_unlock(fd, FD_RDWR);
1309
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/fd_kern.c,v retrieving revision 1.1.1.1
1310
diff -c -r1.1.1.1 fd_kern.c
1311
*** fd_kern.c 1996/02/12 00:58:30 1.1.1.1
1312
--- fd_kern.c 1996/10/03 01:54:15
1317
if ((count = machdep_sys_select(dtablesize, &fd_set_read,
1318
! &fd_set_write, NULL, &__fd_kern_poll_timeout)) < OK) {
1319
if (count == -EINTR) {
1325
if ((count = machdep_sys_select(dtablesize, &fd_set_read,
1326
! &fd_set_write, &fd_set_except, &__fd_kern_poll_timeout)) < OK) {
1327
if (count == -EINTR) {
1333
for (pthread = fd_wait_select.q_next; count && pthread; ) {
1336
for (i = 0; i < pthread->data.select_data->nfds; i++) {
1339
! if ((FD_ISSET(i, &pthread->data.select_data->exceptfds) &&
1340
! ! FD_ISSET(i, &fd_set_except))) {
1341
! FD_CLR(i, &pthread->data.select_data->exceptfds);
1345
! if ((FD_ISSET(i, &pthread->data.select_data->writefds) &&
1346
! ! FD_ISSET(i, &fd_set_write))) {
1347
! FD_CLR(i, &pthread->data.select_data->writefds);
1351
! if ((FD_ISSET(i, &pthread->data.select_data->readfds) &&
1352
! ! FD_ISSET(i, &fd_set_read))) {
1353
! FD_CLR(i, &pthread->data.select_data->readfds);
1364
pthread = pthread->next;
1365
pthread_queue_remove(&fd_wait_select, deq);
1368
for (pthread = fd_wait_select.q_next; count && pthread; ) {
1370
+ fd_set tmp_readfds, tmp_writefds, tmp_exceptfds;
1372
+ memcpy(&tmp_readfds, &pthread->data.select_data->readfds,
1374
+ memcpy(&tmp_writefds, &pthread->data.select_data->writefds,
1376
+ memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds,
1379
for (i = 0; i < pthread->data.select_data->nfds; i++) {
1382
! if( (FD_ISSET(i, &tmp_exceptfds)) ) {
1383
! if( FD_ISSET(i, &fd_set_except) ) {
1384
! count_dec++; /* got a hit */
1386
! FD_CLR(i, &tmp_exceptfds);
1390
! if( (FD_ISSET(i, &tmp_writefds)) ) {
1391
! if( FD_ISSET(i, &fd_set_write) ) {
1392
! count_dec++; /* got a hit */
1394
! FD_CLR(i, &tmp_writefds);
1398
! if( (FD_ISSET(i, &tmp_readfds)) ) {
1399
! if( FD_ISSET(i, &fd_set_read) ) {
1400
! count_dec++; /* got a hit */
1402
! FD_CLR(i, &tmp_readfds);
1413
+ /* Update the threads saved select data fd sets */
1414
+ memcpy(&pthread->data.select_data->readfds, &tmp_readfds,
1416
+ memcpy(&pthread->data.select_data->writefds, &tmp_writefds,
1418
+ memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds,
1422
pthread = pthread->next;
1423
pthread_queue_remove(&fd_wait_select, deq);
1428
while ((count = machdep_sys_select(dtablesize, &fd_set_read,
1429
! &fd_set_write, NULL, &__fd_kern_wait_timeout)) < OK) {
1430
if (count == -EINTR) {
1436
while ((count = machdep_sys_select(dtablesize, &fd_set_read,
1437
! &fd_set_write, &fd_set_except, &__fd_kern_wait_timeout)) < OK) {
1438
if (count == -EINTR) {
1444
for (pthread = fd_wait_select.q_next; count && pthread; ) {
1447
for (i = 0; i < pthread->data.select_data->nfds; i++) {
1450
! if ((FD_ISSET(i, &pthread->data.select_data->exceptfds) &&
1451
! ! FD_ISSET(i, &fd_set_except))) {
1452
! FD_CLR(i, &pthread->data.select_data->exceptfds);
1456
! if ((FD_ISSET(i, &pthread->data.select_data->writefds) &&
1457
! ! FD_ISSET(i, &fd_set_write))) {
1458
! FD_CLR(i, &pthread->data.select_data->writefds);
1462
! if ((FD_ISSET(i, &pthread->data.select_data->readfds) &&
1463
! ! FD_ISSET(i, &fd_set_read))) {
1464
! FD_CLR(i, &pthread->data.select_data->readfds);
1475
pthread = pthread->next;
1476
pthread_queue_remove(&fd_wait_select, deq);
1479
for (pthread = fd_wait_select.q_next; count && pthread; ) {
1481
+ fd_set tmp_readfds, tmp_writefds, tmp_exceptfds;
1483
+ memcpy(&tmp_readfds, &pthread->data.select_data->readfds,
1485
+ memcpy(&tmp_writefds, &pthread->data.select_data->writefds,
1487
+ memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds,
1490
for (i = 0; i < pthread->data.select_data->nfds; i++) {
1493
! if( (FD_ISSET(i, &tmp_exceptfds)) ) {
1494
! if( FD_ISSET(i, &fd_set_except) ) {
1495
! count_dec++; /* got a hit */
1497
! FD_CLR(i, &tmp_exceptfds);
1501
! if( (FD_ISSET(i, &tmp_writefds)) ) {
1502
! if( FD_ISSET(i, &fd_set_write) ) {
1503
! count_dec++; /* got a hit */
1505
! FD_CLR(i, &tmp_writefds);
1509
! if( (FD_ISSET(i, &tmp_readfds)) ) {
1510
! if( FD_ISSET(i, &fd_set_read) ) {
1511
! count_dec++; /* got a hit */
1513
! FD_CLR(i, &tmp_readfds);
1523
+ /* Update the threads saved select data fd sets */
1524
+ memcpy(&pthread->data.select_data->readfds, &tmp_readfds,
1526
+ memcpy(&pthread->data.select_data->writefds, &tmp_writefds,
1528
+ memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds,
1532
pthread = pthread->next;
1533
pthread_queue_remove(&fd_wait_select, deq);
1536
machdep_gettimeofday(¤t_time);
1537
sleep_schedule(¤t_time, timeout);
1539
pthread_resched_resume(PS_FDR_WAIT);
1542
pthread_sched_prevent();
1543
if (sleep_cancel(pthread_run) == NOTOK) {
1544
CLEAR_PF_DONE_EVENT(pthread_run);
1545
pthread_sched_resume();
1546
- SET_ERRNO(ETIMEDOUT);
1550
pthread_sched_resume();
1552
pthread_resched_resume(PS_FDR_WAIT);
1554
CLEAR_PF_DONE_EVENT(pthread_run);
1562
machdep_gettimeofday(¤t_time);
1563
sleep_schedule(¤t_time, timeout);
1565
+ SET_PF_AT_CANCEL_POINT(pthread_run);
1566
pthread_resched_resume(PS_FDR_WAIT);
1567
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
1570
pthread_sched_prevent();
1571
if (sleep_cancel(pthread_run) == NOTOK) {
1572
CLEAR_PF_DONE_EVENT(pthread_run);
1573
pthread_sched_resume();
1577
pthread_sched_resume();
1579
+ SET_PF_AT_CANCEL_POINT(pthread_run);
1580
pthread_resched_resume(PS_FDR_WAIT);
1581
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
1583
CLEAR_PF_DONE_EVENT(pthread_run);
1590
if (sleep_cancel(pthread_run) == NOTOK) {
1591
CLEAR_PF_DONE_EVENT(pthread_run);
1592
pthread_sched_resume();
1593
- SET_ERRNO(ETIMEDOUT);
1601
CLEAR_PF_DONE_EVENT(pthread_run);
1611
machdep_gettimeofday(¤t_time);
1612
sleep_schedule(¤t_time, timeout);
1614
pthread_resched_resume(PS_FDW_WAIT);
1617
pthread_sched_prevent();
1618
if (sleep_cancel(pthread_run) == NOTOK) {
1619
CLEAR_PF_DONE_EVENT(pthread_run);
1620
pthread_sched_resume();
1621
- SET_ERRNO(ETIMEDOUT);
1625
pthread_sched_resume();
1627
pthread_resched_resume(PS_FDW_WAIT);
1629
CLEAR_PF_DONE_EVENT(pthread_run);
1637
machdep_gettimeofday(¤t_time);
1638
sleep_schedule(¤t_time, timeout);
1640
+ SET_PF_AT_CANCEL_POINT(pthread_run);
1641
pthread_resched_resume(PS_FDW_WAIT);
1642
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
1645
pthread_sched_prevent();
1646
if (sleep_cancel(pthread_run) == NOTOK) {
1647
CLEAR_PF_DONE_EVENT(pthread_run);
1648
pthread_sched_resume();
1652
pthread_sched_resume();
1654
+ SET_PF_AT_CANCEL_POINT(pthread_run);
1655
pthread_resched_resume(PS_FDW_WAIT);
1656
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
1658
CLEAR_PF_DONE_EVENT(pthread_run);
1665
if (sleep_cancel(pthread_run) == NOTOK) {
1666
CLEAR_PF_DONE_EVENT(pthread_run);
1667
pthread_sched_resume();
1668
- SET_ERRNO(ETIMEDOUT);
1676
CLEAR_PF_DONE_EVENT(pthread_run);
1687
int create(const char *path, mode_t mode)
1689
! return creat (path, mode);
1692
/* ==========================================================================
1695
int create(const char *path, mode_t mode)
1697
! return creat (path, mode);
1700
/* ==========================================================================
1704
int creat(const char *path, mode_t mode)
1706
! return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode);
1709
/* ==========================================================================
1712
int creat(const char *path, mode_t mode)
1714
! return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode);
1717
/* ==========================================================================
1720
int bind(int fd, const struct sockaddr *name, int namelen)
1722
/* Not much to do in bind */
1726
if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1727
if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) {
1730
fd_unlock(fd, FD_RDWR);
1733
int bind(int fd, const struct sockaddr *name, int namelen)
1735
/* Not much to do in bind */
1738
if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1739
if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) {
1743
fd_unlock(fd, FD_RDWR);
1748
int connect(int fd, const struct sockaddr *name, int namelen)
1750
! struct sockaddr tmpname;
1751
! int ret, tmpnamelen;
1753
! if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1754
if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) {
1755
if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
1756
! ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) ||
1757
! (ret == -EALREADY) || (ret == -EAGAIN))) {
1758
pthread_sched_prevent();
1760
/* queue pthread for a FDW_WAIT */
1763
int connect(int fd, const struct sockaddr *name, int namelen)
1765
! struct sockaddr tmpname;
1766
! int ret, tmpnamelen;
1768
! if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1769
if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) {
1770
if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
1771
! ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) ||
1772
! (ret == -EALREADY) || (ret == -EAGAIN))) {
1773
pthread_sched_prevent();
1775
/* queue pthread for a FDW_WAIT */
1778
tmpnamelen = sizeof(tmpname);
1779
/* OK now lets see if it really worked */
1780
if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
1781
! &tmpname, &tmpnamelen)) < OK) && (ret == -ENOTCONN)) {
1783
/* Get the error, this function should not fail */
1784
machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET,
1785
! SO_ERROR, &pthread_run->error, &tmpnamelen);
1790
tmpnamelen = sizeof(tmpname);
1791
/* OK now lets see if it really worked */
1792
if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
1793
! &tmpname, &tmpnamelen)) < OK)
1794
! && (ret == -ENOTCONN)) {
1796
/* Get the error, this function should not fail */
1797
machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET,
1798
! SO_ERROR, &ret, &tmpnamelen);
1813
fd_unlock(fd, FD_RDWR);
1822
fd_unlock(fd, FD_RDWR);
1831
fd_unlock(fd, FD_RDWR);
1832
SET_ERRNO(-fd_kern);
1836
fd_unlock(fd, FD_RDWR);
1839
fd_unlock(fd, FD_RDWR);
1840
SET_ERRNO(-fd_kern);
1844
fd_unlock(fd, FD_RDWR);
1849
if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1850
! ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog);
1851
! if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) {
1858
if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
1859
! if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) {
1865
CLEAR_PF_DONE_EVENT(pthread_run);
1866
pthread_sched_resume();
1867
SET_ERRNO(ETIMEDOUT);
1871
pthread_sched_resume();
1873
CLEAR_PF_DONE_EVENT(pthread_run);
1874
pthread_sched_resume();
1875
SET_ERRNO(ETIMEDOUT);
1879
pthread_sched_resume();
1882
CLEAR_PF_DONE_EVENT(pthread_run);
1883
pthread_sched_resume();
1884
SET_ERRNO(ETIMEDOUT);
1888
pthread_sched_resume();
1890
CLEAR_PF_DONE_EVENT(pthread_run);
1891
pthread_sched_resume();
1892
SET_ERRNO(ETIMEDOUT);
1896
pthread_sched_resume();
1899
CLEAR_PF_DONE_EVENT(pthread_run);
1900
pthread_sched_resume();
1901
SET_ERRNO(ETIMEDOUT);
1905
pthread_sched_resume();
1907
CLEAR_PF_DONE_EVENT(pthread_run);
1908
pthread_sched_resume();
1909
SET_ERRNO(ETIMEDOUT);
1913
pthread_sched_resume();
1916
CLEAR_PF_DONE_EVENT(pthread_run);
1917
pthread_sched_resume();
1918
SET_ERRNO(ETIMEDOUT);
1922
pthread_sched_resume();
1924
CLEAR_PF_DONE_EVENT(pthread_run);
1925
pthread_sched_resume();
1926
SET_ERRNO(ETIMEDOUT);
1930
pthread_sched_resume();
1933
CLEAR_PF_DONE_EVENT(pthread_run);
1934
pthread_sched_resume();
1935
SET_ERRNO(ETIMEDOUT);
1939
pthread_sched_resume();
1941
CLEAR_PF_DONE_EVENT(pthread_run);
1942
pthread_sched_resume();
1943
SET_ERRNO(ETIMEDOUT);
1947
pthread_sched_resume();
1950
CLEAR_PF_DONE_EVENT(pthread_run);
1951
pthread_sched_resume();
1952
SET_ERRNO(ETIMEDOUT);
1956
pthread_sched_resume();
1958
CLEAR_PF_DONE_EVENT(pthread_run);
1959
pthread_sched_resume();
1960
SET_ERRNO(ETIMEDOUT);
1964
pthread_sched_resume();
1968
int getsockopt(int fd, int level, int optname, void * optval, int * optlen)
1972
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
1973
if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level,
1974
! optname, optval, optlen)) < OK) {
1980
int getsockopt(int fd, int level, int optname, void * optval, int * optlen)
1984
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
1985
if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level,
1986
! optname, optval, optlen)) < OK) {
1993
int getsockname(int fd, struct sockaddr * name, int * naddrlen)
1997
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
1998
! if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i,
1999
! name, naddrlen)) < OK) {
2003
! fd_unlock(fd, FD_RDWR);
2011
int getsockname(int fd, struct sockaddr * name, int * naddrlen)
2015
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
2016
! if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i,
2017
! name, naddrlen)) < OK) {
2021
! fd_unlock(fd, FD_RDWR);
2030
int getpeername(int fd, struct sockaddr * peer, int * paddrlen)
2034
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
2035
! if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
2036
! peer, paddrlen)) < OK) {
2040
! fd_unlock(fd, FD_READ);
2047
int getpeername(int fd, struct sockaddr * peer, int * paddrlen)
2051
! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
2052
! if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
2053
! peer, paddrlen)) < OK) {
2057
! fd_unlock(fd, FD_READ);
2062
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread.c,v retrieving revision 1.1.1.1
2063
diff -c -r1.1.1.1 pthread.c
2064
*** pthread.c 1995/12/13 05:53:01 1.1.1.1
2065
--- pthread.c 1996/10/01 21:42:01
2072
+ /*----------------------------------------------------------------------
2073
+ * Function: __pthread_is_valid
2074
+ * Purpose: Scan the list of threads to see if a specified thread exists
2076
+ * pthread = The thread to scan for
2078
+ * int = 1 if found, 0 if not
2080
+ * The kernel is assumed to be locked
2081
+ *----------------------------------------------------------------------*/
2083
+ __pthread_is_valid( pthread_t pthread )
2085
+ int rtn = 0; /* Assume not found */
2088
+ for( t = pthread_link_list; t; t = t->pll ) {
2089
+ if( t == pthread ) {
2090
+ rtn = 1; /* Found it */
2098
/* ==========================================================================
2104
new_thread->next = NULL;
2105
new_thread->flags = 0;
2107
+ /* PTHREADS spec says we start with cancellability on and deferred */
2108
+ SET_PF_CANCEL_STATE(new_thread, PTHREAD_CANCEL_ENABLE);
2109
+ SET_PF_CANCEL_TYPE(new_thread, PTHREAD_CANCEL_DEFERRED);
2111
new_thread->error_p = NULL;
2112
new_thread->sll = NULL;
2120
- /* ==========================================================================
2121
- * pthread_cancel()
2123
- * This routine will also require a sig_prevent/sig_check_and_resume()
2126
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread_init.c,v retrieving revision 1.1.1.1
2127
diff -c -r1.1.1.1 pthread_init.c
2128
*** pthread_init.c 1996/03/13 04:33:10 1.1.1.1
2129
--- pthread_init.c 1996/10/01 21:43:59
2132
pthread_initial->next = NULL;
2133
pthread_initial->flags = 0;
2134
pthread_initial->pll = NULL;
2135
- pthread_initial->flags = 0;
2136
pthread_initial->sll = NULL;
2138
/* Ugly errno hack */
2139
pthread_initial->error_p = &errno;
2141
pthread_initial->next = NULL;
2142
pthread_initial->flags = 0;
2143
pthread_initial->pll = NULL;
2144
pthread_initial->sll = NULL;
2146
+ /* PTHREADS spec says we start with cancellability on and deferred */
2147
+ SET_PF_CANCEL_STATE(pthread_initial, PTHREAD_CANCEL_ENABLE);
2148
+ SET_PF_CANCEL_TYPE(pthread_initial, PTHREAD_CANCEL_DEFERRED);
2151
/* Ugly errno hack */
2152
pthread_initial->error_p = &errno;
2153
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread_join.c,v retrieving revision 1.1.1.1
2154
diff -c -r1.1.1.1 pthread_join.c
2155
*** pthread_join.c 1995/12/13 05:53:07 1.1.1.1
2156
--- pthread_join.c 1996/10/02 16:54:36
2160
#include <pthread.h>
2163
+ static int testDeadlock( struct pthread_queue *queue, pthread_t target );
2165
/* ==========================================================================
2172
pthread_sched_prevent();
2174
+ /* Ensure they gave us a legal pthread pointer */
2175
+ if( ! __pthread_is_valid( pthread ) ) {
2176
+ pthread_sched_resume();
2180
/* Check that thread isn't detached already */
2181
if (pthread->attr.flags & PTHREAD_DETACHED) {
2182
pthread_sched_resume();
2185
* Note: This must happen after checking detached state.
2187
if (pthread_queue_remove(&pthread_dead_queue, pthread) != OK) {
2188
! pthread_queue_enq(&(pthread->join_queue), pthread_run);
2189
! pthread_resched_resume(PS_JOIN);
2190
! pthread_sched_prevent();
2192
! if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) {
2193
! pthread_queue_enq(&pthread_alloc_queue, pthread);
2194
! pthread->attr.flags |= PTHREAD_DETACHED;
2195
! pthread->state = PS_UNALLOCED;
2196
! if (thread_return) {
2197
! *thread_return = pthread->ret;
2204
/* Just get the return value and detach the thread */
2206
* Note: This must happen after checking detached state.
2208
if (pthread_queue_remove(&pthread_dead_queue, pthread) != OK) {
2210
! /* Before we pend on the join, ensure there is no dead lock */
2212
! if( testDeadlock( &pthread_run->join_queue, pthread ) == NOTOK ) {
2215
! pthread_queue_enq(&(pthread->join_queue), pthread_run);
2216
! SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
2217
! pthread_resched_resume(PS_JOIN);
2218
! CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
2219
! pthread_sched_prevent();
2221
! if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) {
2222
! pthread_queue_enq(&pthread_alloc_queue, pthread);
2223
! pthread->attr.flags |= PTHREAD_DETACHED;
2224
! pthread->state = PS_UNALLOCED;
2225
! if (thread_return) {
2226
! *thread_return = pthread->ret;
2234
/* Just get the return value and detach the thread */
2239
pthread_sched_resume();
2243
+ /*----------------------------------------------------------------------
2244
+ * Function: testDeadlock
2245
+ * Purpose: recursive queue walk to check for deadlocks
2247
+ * queue = the queue to walk
2248
+ * pthread = target to scan for
2250
+ * OK = no deadlock, NOTOK = deadlock
2252
+ *----------------------------------------------------------------------*/
2254
+ testDeadlock( struct pthread_queue *queue, pthread_t target )
2258
+ if( queue == NULL )
2259
+ return OK; /* Empty queue, obviously ok */
2261
+ for( t = queue->q_next; t; t = t->next ) {
2263
+ return NOTOK; /* bang, your dead */
2265
+ if( testDeadlock( &t->join_queue, target ) == NOTOK ) {
2270
+ return OK; /* No deadlock */
2272
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/select.c,v retrieving revision 1.1.1.1
2273
diff -c -r1.1.1.1 select.c
2274
*** select.c 1996/03/05 08:29:14 1.1.1.1
2275
--- select.c 1996/10/02 16:56:27
2278
int select(int numfds, fd_set *readfds, fd_set *writefds,
2279
fd_set *exceptfds, struct timeval *timeout)
2281
! fd_set real_exceptfds, real_readfds, real_writefds; /* mapped fd_sets */
2282
! fd_set * real_readfds_p, * real_writefds_p, * real_exceptfds_p;
2283
! fd_set read_locks, write_locks, rdwr_locks;
2284
! struct timespec timeout_time, current_time;
2285
! struct timeval zero_timeout = { 0, 0 };
2286
! int i, j, ret = 0, got_all_locks = 1;
2287
! struct pthread_select_data data;
2289
! if (numfds > dtablesize) {
2290
! numfds = dtablesize;
2294
! FD_ZERO(&data.readfds);
2295
! FD_ZERO(&data.writefds);
2296
! FD_ZERO(&data.exceptfds);
2298
! /* Do this first */
2300
machdep_gettimeofday(¤t_time);
2301
! timeout_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
2302
! if ((timeout_time.tv_nsec = current_time.tv_nsec +
2303
! (timeout->tv_usec * 1000)) > 1000000000) {
2304
! timeout_time.tv_nsec -= 1000000000;
2305
! timeout_time.tv_sec++;
2309
! FD_ZERO(&read_locks);
2310
! FD_ZERO(&write_locks);
2311
! FD_ZERO(&rdwr_locks);
2312
! FD_ZERO(&real_readfds);
2313
! FD_ZERO(&real_writefds);
2314
! FD_ZERO(&real_exceptfds);
2316
! /* lock readfds */
2317
! if (readfds || writefds || exceptfds) {
2318
! for (i = 0; i < numfds; i++) {
2319
! if ((readfds && (FD_ISSET(i, readfds))) ||
2320
! (exceptfds && FD_ISSET(i, exceptfds))) {
2321
! if (writefds && FD_ISSET(i ,writefds)) {
2322
! if ((ret = fd_lock(i, FD_RDWR, NULL)) != OK) {
2323
! got_all_locks = 0;
2326
! FD_SET(i, &rdwr_locks);
2327
! FD_SET(fd_table[i]->fd.i,&real_writefds);
2329
! if ((ret = fd_lock(i, FD_READ, NULL)) != OK) {
2330
! got_all_locks = 0;
2333
! FD_SET(i, &read_locks);
2335
! if (readfds && FD_ISSET(i,readfds)) {
2336
! FD_SET(fd_table[i]->fd.i, &real_readfds);
2338
! if (exceptfds && FD_ISSET(i,exceptfds)) {
2339
! FD_SET(fd_table[i]->fd.i, &real_exceptfds);
2341
! if (fd_table[i]->fd.i >= data.nfds) {
2342
! data.nfds = fd_table[i]->fd.i + 1;
2345
! if (writefds && FD_ISSET(i, writefds)) {
2346
! if ((ret = fd_lock(i, FD_WRITE, NULL)) != OK) {
2347
! got_all_locks = 0;
2350
! FD_SET(i, &write_locks);
2351
! FD_SET(fd_table[i]->fd.i,&real_writefds);
2353
! if (fd_table[i]->fd.i >= data.nfds) {
2354
! data.nfds = fd_table[i]->fd.i + 1;
2360
! if (got_all_locks) {
2362
! memcpy(&data.readfds,&real_readfds,sizeof(fd_set));
2363
! memcpy(&data.writefds,&real_writefds,sizeof(fd_set));
2364
! memcpy(&data.exceptfds,&real_exceptfds,sizeof(fd_set));
2366
! real_readfds_p = (readfds == NULL) ? NULL : &real_readfds;
2367
! real_writefds_p = (writefds == NULL) ? NULL : &real_writefds;
2368
! real_exceptfds_p = (exceptfds == NULL) ? NULL : &real_exceptfds;
2370
! if ((ret = machdep_sys_select(data.nfds, real_readfds_p,
2371
! real_writefds_p, real_exceptfds_p, &zero_timeout)) == OK) {
2373
! pthread_sched_prevent();
2375
! real_exceptfds_p = (exceptfds == NULL) ? NULL : &data.exceptfds;
2376
! real_writefds_p = (writefds == NULL) ? NULL : &data.writefds;
2377
! real_readfds_p = (readfds == NULL) ? NULL : &data.readfds;
2379
! pthread_queue_enq(&fd_wait_select, pthread_run);
2380
! pthread_run->data.select_data = &data;
2381
! SET_PF_WAIT_EVENT(pthread_run);
2384
! machdep_gettimeofday(¤t_time);
2385
! sleep_schedule(¤t_time, &timeout_time);
2387
! pthread_resched_resume(PS_SELECT_WAIT);
2390
! CLEAR_PF_DONE_EVENT(pthread_run);
2391
! if (sleep_cancel(pthread_run) == NOTOK) {
2397
! pthread_resched_resume(PS_SELECT_WAIT);
2398
! CLEAR_PF_DONE_EVENT(pthread_run);
2399
! ret = data.nfds; /* XXX ??? snl */
2401
! } else if (ret < 0) {
2407
! /* clean up the locks */
2408
! for (i = 0; i < numfds; i++)
2409
! if (FD_ISSET(i,&read_locks)) fd_unlock(i,FD_READ);
2410
! for (i = 0; i < numfds; i++)
2411
! if (FD_ISSET(i,&rdwr_locks)) fd_unlock(i,FD_RDWR);
2412
! for (i = 0; i < numfds; i++)
2413
! if (FD_ISSET(i,&write_locks)) fd_unlock(i,FD_WRITE);
2416
! if (readfds != NULL) {
2417
! for (i = 0; i < numfds; i++) {
2418
! if (! (FD_ISSET(i,readfds) &&
2419
! FD_ISSET(fd_table[i]->fd.i,real_readfds_p)))
2420
! FD_CLR(i,readfds);
2423
! if (writefds != NULL) {
2424
! for (i = 0; i < numfds; i++)
2425
! if (! (FD_ISSET(i,writefds) &&
2426
! FD_ISSET(fd_table[i]->fd.i,real_writefds_p)))
2427
! FD_CLR(i,writefds);
2429
! if (exceptfds != NULL) {
2430
! for (i = 0; i < numfds; i++)
2431
! if (! (FD_ISSET(i,exceptfds) &&
2432
! FD_ISSET(fd_table[i]->fd.i,real_exceptfds_p)))
2433
! FD_CLR(i,exceptfds);
2436
! if (exceptfds != NULL) FD_ZERO(exceptfds);
2437
! if (writefds != NULL) FD_ZERO(writefds);
2438
! if (readfds != NULL) FD_ZERO(readfds);
2444
int select(int numfds, fd_set *readfds, fd_set *writefds,
2445
fd_set *exceptfds, struct timeval *timeout)
2447
! fd_set real_exceptfds, real_readfds, real_writefds; /* mapped fd_sets */
2448
! fd_set * real_readfds_p, * real_writefds_p, * real_exceptfds_p;
2449
! fd_set read_locks, write_locks, rdwr_locks;
2450
! struct timespec timeout_time, current_time;
2451
! struct timeval zero_timeout = { 0, 0 };
2452
! int i, j, ret = 0, got_all_locks = 1;
2453
! struct pthread_select_data data;
2455
! if (numfds > dtablesize) {
2456
! numfds = dtablesize;
2460
! FD_ZERO(&data.readfds);
2461
! FD_ZERO(&data.writefds);
2462
! FD_ZERO(&data.exceptfds);
2464
! /* Do this first */
2466
! machdep_gettimeofday(¤t_time);
2467
! timeout_time.tv_sec = current_time.tv_sec + timeout->tv_sec;
2468
! if ((timeout_time.tv_nsec = current_time.tv_nsec +
2469
! (timeout->tv_usec * 1000)) > 1000000000) {
2470
! timeout_time.tv_nsec -= 1000000000;
2471
! timeout_time.tv_sec++;
2475
! FD_ZERO(&read_locks);
2476
! FD_ZERO(&write_locks);
2477
! FD_ZERO(&rdwr_locks);
2478
! FD_ZERO(&real_readfds);
2479
! FD_ZERO(&real_writefds);
2480
! FD_ZERO(&real_exceptfds);
2482
! /* lock readfds */
2483
! if (readfds || writefds || exceptfds) {
2484
! for (i = 0; i < numfds; i++) {
2485
! if ((readfds && (FD_ISSET(i, readfds))) ||
2486
! (exceptfds && FD_ISSET(i, exceptfds))) {
2487
! if (writefds && FD_ISSET(i ,writefds)) {
2488
! if ((ret = fd_lock(i, FD_RDWR, NULL)) != OK) {
2489
! got_all_locks = 0;
2492
! FD_SET(i, &rdwr_locks);
2493
! FD_SET(fd_table[i]->fd.i,&real_writefds);
2495
! if ((ret = fd_lock(i, FD_READ, NULL)) != OK) {
2496
! got_all_locks = 0;
2499
! FD_SET(i, &read_locks);
2501
! if (readfds && FD_ISSET(i,readfds)) {
2502
! FD_SET(fd_table[i]->fd.i, &real_readfds);
2504
! if (exceptfds && FD_ISSET(i,exceptfds)) {
2505
! FD_SET(fd_table[i]->fd.i, &real_exceptfds);
2507
! if (fd_table[i]->fd.i >= data.nfds) {
2508
! data.nfds = fd_table[i]->fd.i + 1;
2511
! if (writefds && FD_ISSET(i, writefds)) {
2512
! if ((ret = fd_lock(i, FD_WRITE, NULL)) != OK) {
2513
! got_all_locks = 0;
2516
! FD_SET(i, &write_locks);
2517
! FD_SET(fd_table[i]->fd.i,&real_writefds);
2518
! if (fd_table[i]->fd.i >= data.nfds) {
2519
! data.nfds = fd_table[i]->fd.i + 1;
2526
! if (got_all_locks) {
2527
! memcpy(&data.readfds,&real_readfds,sizeof(fd_set));
2528
! memcpy(&data.writefds,&real_writefds,sizeof(fd_set));
2529
! memcpy(&data.exceptfds,&real_exceptfds,sizeof(fd_set));
2531
! real_readfds_p = (readfds == NULL) ? NULL : &real_readfds;
2532
! real_writefds_p = (writefds == NULL) ? NULL : &real_writefds;
2533
! real_exceptfds_p = (exceptfds == NULL) ? NULL : &real_exceptfds;
2535
! if ((ret = machdep_sys_select(data.nfds, real_readfds_p,
2536
! real_writefds_p, real_exceptfds_p,
2537
! &zero_timeout)) == OK) {
2538
! pthread_sched_prevent();
2540
! real_exceptfds_p = (exceptfds == NULL) ? NULL : &data.exceptfds;
2541
! real_writefds_p = (writefds == NULL) ? NULL : &data.writefds;
2542
! real_readfds_p = (readfds == NULL) ? NULL : &data.readfds;
2544
! pthread_queue_enq(&fd_wait_select, pthread_run);
2545
! pthread_run->data.select_data = &data;
2546
! SET_PF_WAIT_EVENT(pthread_run);
2549
machdep_gettimeofday(¤t_time);
2550
! sleep_schedule(¤t_time, &timeout_time);
2552
! SET_PF_AT_CANCEL_POINT(pthread_run);
2553
! pthread_resched_resume(PS_SELECT_WAIT);
2554
! CLEAR_PF_AT_CANCEL_POINT(pthread_run);
2557
! CLEAR_PF_DONE_EVENT(pthread_run);
2558
! if (sleep_cancel(pthread_run) == NOTOK) {
2564
! SET_PF_AT_CANCEL_POINT(pthread_run);
2565
! pthread_resched_resume(PS_SELECT_WAIT);
2566
! CLEAR_PF_AT_CANCEL_POINT(pthread_run);
2567
! CLEAR_PF_DONE_EVENT(pthread_run);
2568
! ret = data.nfds; /* XXX ??? snl */
2570
! } else if (ret < 0) {
2576
! /* clean up the locks */
2577
! for (i = 0; i < numfds; i++)
2578
! if (FD_ISSET(i,&read_locks)) fd_unlock(i,FD_READ);
2579
! for (i = 0; i < numfds; i++)
2580
! if (FD_ISSET(i,&rdwr_locks)) fd_unlock(i,FD_RDWR);
2581
! for (i = 0; i < numfds; i++)
2582
! if (FD_ISSET(i,&write_locks)) fd_unlock(i,FD_WRITE);
2585
! if (readfds != NULL) {
2586
! for (i = 0; i < numfds; i++) {
2587
! if (! (FD_ISSET(i,readfds) &&
2588
! FD_ISSET(fd_table[i]->fd.i,real_readfds_p)))
2589
! FD_CLR(i,readfds);
2592
! if (writefds != NULL) {
2593
! for (i = 0; i < numfds; i++)
2594
! if (! (FD_ISSET(i,writefds) &&
2595
! FD_ISSET(fd_table[i]->fd.i,real_writefds_p)))
2596
! FD_CLR(i,writefds);
2598
! if (exceptfds != NULL) {
2599
! for (i = 0; i < numfds; i++)
2600
! if (! (FD_ISSET(i,exceptfds) &&
2601
! FD_ISSET(fd_table[i]->fd.i,real_exceptfds_p)))
2602
! FD_CLR(i,exceptfds);
2605
+ if (exceptfds != NULL) FD_ZERO(exceptfds);
2606
+ if (writefds != NULL) FD_ZERO(writefds);
2607
+ if (readfds != NULL) FD_ZERO(readfds);
2612
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/sig.c,v retrieving revision 1.1.1.1
2613
diff -c -r1.1.1.1 sig.c
2614
*** sig.c 1996/03/13 04:33:13 1.1.1.1
2615
--- sig.c 1996/10/03 01:07:54
2619
pthread_run->data.sigwait = set;
2620
pthread_run->ret = sig;
2622
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
2623
pthread_resched_resume(PS_SIGWAIT);
2624
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
2629
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/signal.c,v retrieving revision 1.1.1.1
2630
diff -c -r1.1.1.1 signal.c
2631
*** signal.c 1996/03/13 04:33:17 1.1.1.1
2632
--- signal.c 1996/10/03 17:30:16
2637
static void sig_handler(int signal);
2638
static void set_thread_timer();
2639
+ static void __cleanup_after_resume( void );
2640
void sig_prevent(void);
2641
void sig_resume(void);
2648
! /* Only bother if we are truly unlocking the kernel */
2649
! while (!(--pthread_kernel_lock)) {
2650
! if (sig_to_process) {
2651
! /* if (SIG_ANY(sig_to_process)) { */
2652
! pthread_kernel_lock++;
2656
! if (pthread_run && pthread_run->sigcount) {
2657
! pthread_kernel_lock++;
2658
! pthread_sig_process();
2665
/* ==========================================================================
2670
! __cleanup_after_resume();
2673
/* ==========================================================================
2676
void pthread_resched_resume(enum pthread_state state)
2678
pthread_run->state = state;
2679
- sig_handler(SIGVTALRM);
2681
! /* Only bother if we are truely unlocking the kernel */
2682
! while (!(--pthread_kernel_lock)) {
2683
! if (sig_to_process) {
2684
! /* if (SIG_ANY(sig_to_process)) { */
2685
! pthread_kernel_lock++;
2689
! if (pthread_run && pthread_run->sigcount) {
2690
! pthread_kernel_lock++;
2691
! pthread_sig_process();
2698
/* ==========================================================================
2700
void pthread_resched_resume(enum pthread_state state)
2702
pthread_run->state = state;
2704
! /* Since we are about to block this thread, lets see if we are
2705
! * at a cancel point and if we've been cancelled.
2706
! * Avoid cancelling dead or unalloced threads.
2708
! if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
2709
! TEST_PTHREAD_IS_CANCELLABLE(pthread_run) &&
2710
! state != PS_DEAD && state != PS_UNALLOCED ) {
2712
! /* Set this flag to avoid recursively calling pthread_exit */
2713
! /* We have to set this flag here because we will unlock the
2714
! * kernel prior to calling pthread_cancel_internal.
2716
! SET_PF_RUNNING_TO_CANCEL(pthread_run);
2718
! pthread_run->old_state = state; /* unlock needs this data */
2719
! pthread_sched_resume(); /* Unlock kernel before cancel */
2720
! pthread_cancel_internal( 1 ); /* free locks and exit */
2723
+ sig_handler(SIGVTALRM);
2725
+ __cleanup_after_resume();
2728
/* ==========================================================================
2733
void pthread_sched_resume()
2735
+ __cleanup_after_resume();
2738
+ /*----------------------------------------------------------------------
2739
+ * Function: __cleanup_after_resume
2740
+ * Purpose: cleanup kernel locks after a resume
2744
+ *----------------------------------------------------------------------*/
2746
+ __cleanup_after_resume( void )
2748
/* Only bother if we are truely unlocking the kernel */
2749
while (!(--pthread_kernel_lock)) {
2750
/* if (SIG_ANY(sig_to_process)) { */
2759
+ if( pthread_run == NULL )
2760
+ return; /* Must be during init processing */
2762
+ /* Test for cancel that should be handled now */
2764
+ if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
2765
+ TEST_PTHREAD_IS_CANCELLABLE(pthread_run) ) {
2766
+ /* Kernel is already unlocked */
2767
+ pthread_cancel_internal( 1 ); /* free locks and exit */
2771
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/sleep.c,v retrieving revision 1.1.1.1
2772
diff -c -r1.1.1.1 sleep.c
2773
*** sleep.c 1996/03/11 08:33:32 1.1.1.1
2774
--- sleep.c 1996/10/03 01:14:58
2779
/* Reschedule thread */
2780
SET_PF_WAIT_EVENT(pthread_run);
2781
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
2782
pthread_resched_resume(PS_SLEEP_WAIT);
2783
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
2784
CLEAR_PF_DONE_EVENT(pthread_run);
2786
/* Return actual time slept */
2789
current_time.tv_sec++;
2791
machdep_start_timer(&(current_time),
2792
! &(pthread_sleep->wakeup_time));
2795
for (pthread_last = pthread_sleep; pthread_last;
2797
current_time.tv_sec++;
2799
machdep_start_timer(&(current_time),
2800
! &(pthread_sleep->wakeup_time));
2803
for (pthread_last = pthread_sleep; pthread_last;
2804
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/stat.c,v retrieving revision 1.1.1.1
2805
diff -c -r1.1.1.1 stat.c
2806
*** stat.c 1995/09/21 02:36:05 1.1.1.1
2807
--- stat.c 1996/06/04 19:17:33
2816
/* ==========================================================================
2826
+ /* ==========================================================================
2829
+ * Might want to indirect this.
2831
+ int fstatfs(int fd, struct statfs *buf)
2835
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
2836
+ if ((ret = machdep_sys_fstatfs(fd_table[fd]->fd.i, buf)) < OK) {
2840
+ fd_unlock(fd, FD_READ);
2845
=================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/wait.c,v retrieving revision 1.1.1.1
2846
diff -c -r1.1.1.1 wait.c
2847
*** wait.c 1995/02/21 08:07:24 1.1.1.1
2848
--- wait.c 1996/10/03 01:20:02
2852
pthread_queue_enq(&wait_queue, pthread_run);
2854
/* reschedule unlocks scheduler */
2855
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
2856
pthread_resched_resume(PS_WAIT_WAIT);
2857
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
2859
pthread_sched_prevent();
2864
pthread_queue_enq(&wait_queue, pthread_run);
2866
/* reschedule unlocks scheduler */
2867
+ SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */
2868
pthread_resched_resume(PS_WAIT_WAIT);
2869
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */
2871
pthread_sched_prevent();