diff -c -r1.1.1.1 pthread.h *** pthread.h 1996/03/13 04:30:57 1.1.1.1 --- pthread.h 1996/10/02 17:52:47 *************** *** 35,40 **** --- 35,43 ---- * * 1.00 93/07/20 proven * -Started coding this file. + * + * 93/9/28 streepy - Added support for pthread cancel + * */ #ifndef _PTHREAD_H_ *************** *** 65,70 **** --- 68,82 ---- /* More includes, that need size_t */ #include + /* Constants for use with pthread_setcancelstate and pthread_setcanceltype */ + #define PTHREAD_CANCEL_DISABLE 0 + #define PTHREAD_CANCEL_ENABLE 1 + #define PTHREAD_CANCEL_DEFERRED 0 + #define PTHREAD_CANCEL_ASYNCHRONOUS 1 + + #define PTHREAD_CANCELLED (void *)1 /* Exit status of a cancelled thread */ + + #ifdef PTHREAD_KERNEL #include /* for sigset_t */ *************** *** 78,120 **** PS_STATE_MAX }; - #define PF_WAIT_EVENT 0x01 - #define PF_DONE_EVENT 0x02 - /* Put PANIC inside an expression that evaluates to non-void type, to make it easier to combine it in expressions. */ ! #define DO_PANIC() (PANIC (), 0) ! #define PANICIF(x) ((x) ? DO_PANIC () : 0) ! #define SET_PF_DONE_EVENT(x) \ ! ( !(x->flags & PF_DONE_EVENT) \ ! ? ( (x->flags & PF_WAIT_EVENT) \ ! ? (x->flags = PF_DONE_EVENT, OK) \ ! : DO_PANIC ()) \ : NOTOK ) ! #define SET_PF_WAIT_EVENT(x) \ ! ( PANICIF (x->flags & (PF_WAIT_EVENT | PF_DONE_EVENT)), \ ! (x->flags = PF_WAIT_EVENT), 0) ! ! #define CLEAR_PF_DONE_EVENT(x) \ ! ( PANICIF (!(x->flags & PF_DONE_EVENT)), \ ! x->flags = 0 ) struct pthread_select_data { ! int nfds; ! fd_set readfds; ! fd_set writefds; ! fd_set exceptfds; }; union pthread_wait_data { ! pthread_mutex_t * mutex; ! pthread_cond_t * cond; ! const sigset_t * sigwait; /* Waiting on a signal in sigwait */ struct { ! short fd; /* Used when thread waiting on fd */ ! short branch; /* line number, for debugging */ } fd; struct pthread_select_data * select_data; }; --- 90,185 ---- PS_STATE_MAX }; /* Put PANIC inside an expression that evaluates to non-void type, to make it easier to combine it in expressions. */ ! #define DO_PANIC() (PANIC (), 0) ! #define PANICIF(x) ((x) ? DO_PANIC () : 0) ! ! /* In the thread flag field, we use a series of bit flags. Flags can ! * organized into "groups" of mutually exclusive flags. Other flags ! * are unrelated and can be set and cleared with a single bit operation. ! */ ! #define PF_WAIT_EVENT 0x01 ! #define PF_DONE_EVENT 0x02 ! #define PF_EVENT_GROUP 0x03 /* All event bits */ ! ! #define PF_CANCEL_STATE 0x04 /* cancellability state */ ! #define PF_CANCEL_TYPE 0x08 /* cancellability type */ ! #define PF_THREAD_CANCELLED 0x10 /* thread has been cancelled */ ! #define PF_RUNNING_TO_CANCEL 0x20 /* Thread is running so it can cancel*/ ! #define PF_AT_CANCEL_POINT 0x40 /* Thread is at a cancel point */ ! ! /* Flag operations */ ! ! #define SET_PF_FLAG(x,f) ( (x)->flags |= (f) ) ! #define TEST_PF_FLAG(x,f) ( (x)->flags & (f) ) ! #define CLEAR_PF_FLAG(x,f) ( (x)->flags &= ~(f) ) ! #define CLEAR_PF_GROUP(x,g) ( (x)->flags &= ~(g) ) ! #define SET_PF_FLAG_IN_GROUP(x,g,f) ( CLEAR_PF_GROUP(x,g),SET_PF_FLAG(x,f)) ! #define TEST_PF_GROUP(x,g) ( (x)->flags & (g) ) ! ! #define SET_PF_DONE_EVENT(x) \ ! ( !TEST_PF_FLAG(x,PF_DONE_EVENT) \ ! ? ( TEST_PF_FLAG(x,PF_WAIT_EVENT) \ ! ? (SET_PF_FLAG_IN_GROUP(x,PF_EVENT_GROUP,PF_DONE_EVENT), OK) \ ! : DO_PANIC ()) \ : NOTOK ) ! #define SET_PF_WAIT_EVENT(x) \ ! ( PANICIF (TEST_PF_GROUP(x,PF_EVENT_GROUP) ), \ ! SET_PF_FLAG_IN_GROUP(x,PF_EVENT_GROUP,PF_WAIT_EVENT), 0) ! ! #define CLEAR_PF_DONE_EVENT(x) \ ! ( PANICIF (!TEST_PF_FLAG(x,PF_DONE_EVENT)), \ ! CLEAR_PF_GROUP(x,PF_EVENT_GROUP) ) ! ! #define SET_PF_CANCELLED(x) ( SET_PF_FLAG(x,PF_THREAD_CANCELLED) ) ! #define TEST_PF_CANCELLED(x) ( TEST_PF_FLAG(x,PF_THREAD_CANCELLED) ) ! ! #define SET_PF_RUNNING_TO_CANCEL(x) ( SET_PF_FLAG(x,PF_RUNNING_TO_CANCEL) ) ! #define CLEAR_PF_RUNNING_TO_CANCEL(x)( CLEAR_PF_FLAG(x,PF_RUNNING_TO_CANCEL) ) ! #define TEST_PF_RUNNING_TO_CANCEL(x)( TEST_PF_FLAG(x,PF_RUNNING_TO_CANCEL) ) ! ! #define SET_PF_AT_CANCEL_POINT(x) ( SET_PF_FLAG(x,PF_AT_CANCEL_POINT) ) ! #define CLEAR_PF_AT_CANCEL_POINT(x) ( CLEAR_PF_FLAG(x,PF_AT_CANCEL_POINT) ) ! #define TEST_PF_AT_CANCEL_POINT(x) ( TEST_PF_FLAG(x,PF_AT_CANCEL_POINT) ) ! ! #define SET_PF_CANCEL_STATE(x,f) \ ! ( (f) ? SET_PF_FLAG(x,PF_CANCEL_STATE) : CLEAR_PF_FLAG(x,PF_CANCEL_STATE) ) ! #define TEST_PF_CANCEL_STATE(x) \ ! ( (TEST_PF_FLAG(x,PF_CANCEL_STATE)) ? PTHREAD_CANCEL_ENABLE \ ! : PTHREAD_CANCEL_DISABLE ) ! ! #define SET_PF_CANCEL_TYPE(x,f) \ ! ( (f) ? SET_PF_FLAG(x,PF_CANCEL_TYPE) : CLEAR_PF_FLAG(x,PF_CANCEL_TYPE) ) ! #define TEST_PF_CANCEL_TYPE(x) \ ! ( (TEST_PF_FLAG(x,PF_CANCEL_TYPE)) ? PTHREAD_CANCEL_ASYNCHRONOUS \ ! : PTHREAD_CANCEL_DEFERRED ) ! ! /* See if a thread is in a state that it can be cancelled */ ! #define TEST_PTHREAD_IS_CANCELLABLE(x) \ ! ( (TEST_PF_CANCEL_STATE(x) == PTHREAD_CANCEL_ENABLE && TEST_PF_CANCELLED(x)) \ ! ? ((TEST_PF_CANCEL_TYPE(x) == PTHREAD_CANCEL_ASYNCHRONOUS) \ ! ? 1 \ ! : TEST_PF_AT_CANCEL_POINT(x)) \ ! : 0 ) ! struct pthread_select_data { ! int nfds; ! fd_set readfds; ! fd_set writefds; ! fd_set exceptfds; }; union pthread_wait_data { ! pthread_mutex_t * mutex; ! pthread_cond_t * cond; ! const sigset_t * sigwait; /* Waiting on a signal in sigwait */ struct { ! short fd; /* Used when thread waiting on fd */ ! short branch; /* line number, for debugging */ } fd; struct pthread_select_data * select_data; }; *************** *** 122,143 **** #define PTT_USER_THREAD 0x0001 struct pthread { ! int thread_type; struct machdep_pthread machdep_data; ! pthread_attr_t attr; /* Signal interface */ ! sigset_t sigmask; ! sigset_t sigpending; ! int sigcount; /* Number of signals pending */ /* Timeout time */ ! struct timespec wakeup_time; /* Join queue for waiting threads */ struct pthread_queue join_queue; - /* * Thread implementations are just multiple queue type implemenations, * Below are the various link lists currently necessary --- 187,207 ---- #define PTT_USER_THREAD 0x0001 struct pthread { ! int thread_type; struct machdep_pthread machdep_data; ! pthread_attr_t attr; /* Signal interface */ ! sigset_t sigmask; ! sigset_t sigpending; ! int sigcount; /* Number of signals pending */ /* Timeout time */ ! struct timespec wakeup_time; /* Join queue for waiting threads */ struct pthread_queue join_queue; /* * Thread implementations are just multiple queue type implemenations, * Below are the various link lists currently necessary *************** *** 152,165 **** * ALL threads, in any state. * Must lock kernel lock before manipulating. */ ! struct pthread * pll; /* * Standard link list for running threads, mutexes, etc ... * It can't be on both a running link list and a wait queue. * Must lock kernel lock before manipulating. */ ! struct pthread * next; union pthread_wait_data data; /* --- 216,229 ---- * ALL threads, in any state. * Must lock kernel lock before manipulating. */ ! struct pthread * pll; /* * Standard link list for running threads, mutexes, etc ... * It can't be on both a running link list and a wait queue. * Must lock kernel lock before manipulating. */ ! struct pthread * next; union pthread_wait_data data; /* *************** *** 167,197 **** * (Note: "priority" is a reserved word in Concurrent C, please * don't use it. --KR) */ ! struct pthread_queue * queue; ! enum pthread_state state; ! char flags; ! char pthread_priority; /* * Sleep queue, this is different from the standard link list * because it is possible to be on both (pthread_cond_timedwait(); * Must lock sleep mutex before manipulating */ ! struct pthread *sll; /* For sleeping threads */ /* * Data that doesn't need to be locked ! * Mostly it's because only the thread owning the data can manipulate it */ ! void * ret; ! int error; ! int * error_p; ! const void ** specific_data; ! int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; - }; #else /* not PTHREAD_KERNEL */ --- 231,261 ---- * (Note: "priority" is a reserved word in Concurrent C, please * don't use it. --KR) */ ! struct pthread_queue * queue; ! enum pthread_state state; ! enum pthread_state old_state; /* Used when cancelled */ ! char flags; ! char pthread_priority; /* * Sleep queue, this is different from the standard link list * because it is possible to be on both (pthread_cond_timedwait(); * Must lock sleep mutex before manipulating */ ! struct pthread *sll; /* For sleeping threads */ /* * Data that doesn't need to be locked ! * Mostly because only the thread owning the data can manipulate it */ ! void * ret; ! int error; ! int * error_p; ! const void ** specific_data; ! int specific_data_count; /* Cleanup handlers Link List */ struct pthread_cleanup *cleanup; }; #else /* not PTHREAD_KERNEL */ *************** *** 200,223 **** #endif ! typedef struct pthread * pthread_t; /* * Globals */ #ifdef PTHREAD_KERNEL ! extern struct pthread * pthread_run; ! extern struct pthread * pthread_initial; ! extern struct pthread * pthread_link_list; extern struct pthread_queue pthread_dead_queue; extern struct pthread_queue pthread_alloc_queue; ! extern pthread_attr_t pthread_attr_default; ! extern volatile int fork_lock; ! extern pthread_size_t pthread_pagesize; ! ! extern sigset_t * uthread_sigmask; #endif --- 264,293 ---- #endif ! typedef struct pthread *pthread_t; /* * Globals */ #ifdef PTHREAD_KERNEL ! extern struct pthread * pthread_run; ! extern struct pthread * pthread_initial; ! extern struct pthread * pthread_link_list; extern struct pthread_queue pthread_dead_queue; extern struct pthread_queue pthread_alloc_queue; ! extern pthread_attr_t pthread_attr_default; ! extern volatile int fork_lock; ! extern pthread_size_t pthread_pagesize; ! ! extern sigset_t * uthread_sigmask; ! ! /* Kernel global functions */ ! extern void pthread_sched_prevent(void); ! extern void pthread_sched_resume(void); ! extern int __pthread_is_valid( pthread_t ); ! extern void pthread_cancel_internal( int freelocks ); #endif *************** *** 229,271 **** #if defined(DCE_COMPAT) ! typedef void * (*pthread_startroutine_t)(void *) ! typedef void * pthread_addr_t ! int pthread_create __P((pthread_t *, pthread_attr_t, ! pthread_startroutine_t, ! pthread_addr_t)); ! void pthread_exit __P((pthread_addr_t)); ! int pthread_join __P((pthread_t, pthread_addr_t *)); #else ! void pthread_init __P((void)); ! int pthread_create __P((pthread_t *, ! const pthread_attr_t *, ! void * (*start_routine)(void *), ! void *)); ! void pthread_exit __P((void *)); ! pthread_t pthread_self __P((void)); ! int pthread_equal __P((pthread_t, pthread_t)); ! int pthread_join __P((pthread_t, void **)); ! int pthread_detach __P((pthread_t)); ! void pthread_yield __P((void)); ! ! int pthread_setschedparam __P((pthread_t pthread, int policy, ! struct sched_param * param)); ! int pthread_getschedparam __P((pthread_t pthread, int * policy, ! struct sched_param * param)); ! ! int pthread_kill __P((struct pthread *, int)); ! int pthread_signal __P((int, void (*)(int))); #endif #if defined(PTHREAD_KERNEL) /* Not valid, but I can't spell so this will be caught at compile time */ ! #define pthread_yeild(notvalid) #endif --- 299,343 ---- #if defined(DCE_COMPAT) ! typedef void * (*pthread_startroutine_t)(void *); ! typedef void * pthread_addr_t; ! int pthread_create __P((pthread_t *, pthread_attr_t, ! pthread_startroutine_t, pthread_addr_t)); ! void pthread_exit __P((pthread_addr_t)); ! int pthread_join __P((pthread_t, pthread_addr_t *)); #else ! void pthread_init __P((void)); ! int pthread_create __P((pthread_t *, const pthread_attr_t *, ! void * (*start_routine)(void *), void *)); ! void pthread_exit __P((void *)); ! pthread_t pthread_self __P((void)); ! int pthread_equal __P((pthread_t, pthread_t)); ! int pthread_join __P((pthread_t, void **)); ! int pthread_detach __P((pthread_t)); ! void pthread_yield __P((void)); ! ! int pthread_setschedparam __P((pthread_t pthread, int policy, ! struct sched_param * param)); ! int pthread_getschedparam __P((pthread_t pthread, int * policy, ! struct sched_param * param)); ! ! int pthread_kill __P((struct pthread *, int)); ! int pthread_signal __P((int, void (*)(int))); ! ! int pthread_cancel __P(( pthread_t pthread )); ! int pthread_setcancelstate __P(( int state, int *oldstate )); ! int pthread_setcanceltype __P(( int type, int *oldtype )); ! void pthread_testcancel __P(( void )); #endif #if defined(PTHREAD_KERNEL) /* Not valid, but I can't spell so this will be caught at compile time */ ! #define pthread_yeild(notvalid) #endif =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/include/signal.h,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 signal.h *** signal.h 1995/12/25 03:03:09 1.1.1.1 --- signal.h 1996/09/26 21:46:04 *************** *** 43,48 **** --- 43,49 ---- __BEGIN_DECLS int raise __P((int)); + __sighandler_t signal __P((int __sig, __sighandler_t)); #ifndef _ANSI_SOURCE =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/include/pthread/kernel.h,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 kernel.h *** kernel.h 1994/12/13 07:09:01 1.1.1.1 --- kernel.h 1996/10/02 19:08:41 *************** *** 42,48 **** */ #if defined(PTHREAD_KERNEL) ! #define PANIC() abort() /* Time each rr thread gets */ #define PTHREAD_RR_TIMEOUT 100000000 --- 42,54 ---- */ #if defined(PTHREAD_KERNEL) ! #ifdef __GNUC__ ! #include ! #define PANIC() panic_kernel( __FILE__, __LINE__, __ASSERT_FUNCTION ) ! #else ! #define PANIC() panic_kernel( __FILE__, __LINE__, (const char *)0 ) ! #endif ! /* Time each rr thread gets */ #define PTHREAD_RR_TIMEOUT 100000000 =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/syscall-i386-linux-1.0.S,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 syscall-i386-linux-1.0.S *** syscall-i386-linux-1.0.S 1995/09/27 04:38:55 1.1.1.1 --- syscall-i386-linux-1.0.S 1996/06/04 19:20:17 *************** *** 147,154 **** /* ========================================================================= * exit 1 select 82 ! * fork 2 socketcall 102 ! * read 3 * write 4 * open 5 * creat 8 --- 147,154 ---- /* ========================================================================= * exit 1 select 82 ! * fork 2 fstatfs 100 ! * read 3 socketcall 102 * write 4 * open 5 * creat 8 *************** *** 160,166 **** * chown 16 * lseek 19 * rename 38 ! * dup 41 * pipe 42 * ioctl 54 * fcntl 55 --- 160,166 ---- * chown 16 * lseek 19 * rename 38 ! * dup 41 * pipe 42 * ioctl 54 * fcntl 55 *************** *** 302,314 **** #endif /* ========================================================================== ! * machdep_sys_fstat() */ #ifdef __ELF__ STATCALL2(lstat) #else SYSCALL2(lstat) #endif /* ========================================================================== * machdep_sys_ftruncate() --- 302,320 ---- #endif /* ========================================================================== ! * machdep_sys_lstat() */ #ifdef __ELF__ STATCALL2(lstat) #else SYSCALL2(lstat) #endif + + /* ========================================================================== + * machdep_sys_fstatfs() + */ + SYSCALL2(fstatfs) + /* ========================================================================== * machdep_sys_ftruncate() =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/linux-1.0/socket.h,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 socket.h *** socket.h 1995/12/26 02:28:03 1.1.1.1 --- socket.h 1996/09/27 18:12:45 *************** *** 26,32 **** #endif ! /* #include /* arch-dependent defines */ #include /* the SIOCxxx I/O controls */ #include --- 26,32 ---- #endif ! /* #include arch-dependent defines */ #include /* the SIOCxxx I/O controls */ #include *************** *** 161,166 **** --- 161,188 ---- int connect __P((int, const struct sockaddr *, int)); int listen __P((int, int)); int socket __P((int, int, int)); + + int getsockopt __P ((int __s, int __level, int __optname, + void *__optval, int *__optlen)); + int setsockopt __P ((int __s, int __level, int __optname, + __const void *__optval, int optlen)); + int getsockname __P ((int __sockfd, struct sockaddr *__addr, + int *__paddrlen)); + int getpeername __P ((int __sockfd, struct sockaddr *__peer, + int *__paddrlen)); + ssize_t send __P ((int __sockfd, __const void *__buff, size_t __len, int __flags)); + ssize_t recv __P ((int __sockfd, void *__buff, size_t __len, int __flags)); + ssize_t sendto __P ((int __sockfd, __const void *__buff, size_t __len, + int __flags, __const struct sockaddr *__to, + int __tolen)); + ssize_t recvfrom __P ((int __sockfd, void *__buff, size_t __len, + int __flags, struct sockaddr *__from, + int *__fromlen)); + extern ssize_t sendmsg __P ((int __fd, __const struct msghdr *__message, + int __flags)); + extern ssize_t recvmsg __P ((int __fd, struct msghdr *__message, + int __flags)); + int shutdown __P ((int __sockfd, int __how)); __END_DECLS =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/machdep/linux-1.0/timers.h,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 timers.h *** timers.h 1996/03/05 08:28:36 1.1.1.1 --- timers.h 1996/05/25 21:30:08 *************** *** 43,52 **** --- 43,54 ---- #include #include + #ifndef _LINUX_TIME_H struct timespec { time_t tv_sec; long tv_nsec; }; + #endif /* _LINUX_TIME_H */ #define TIMEVAL_TO_TIMESPEC(tv, ts) { \ (ts)->tv_sec = (tv)->tv_sec; \ =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/getprotoent.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 getprotoent.c *** getprotoent.c 1996/02/09 05:39:41 1.1.1.1 --- getprotoent.c 1996/05/27 01:11:27 *************** *** 128,135 **** if (p != NULL) *p++ = '\0'; } ! if (p && *p); ! break; } *alias = NULL; pthread_mutex_unlock(&proto_file_lock); --- 128,135 ---- if (p != NULL) *p++ = '\0'; } ! if (p && *p) ! break; } *alias = NULL; pthread_mutex_unlock(&proto_file_lock); =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/proto_internal.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 proto_internal.c *** proto_internal.c 1996/02/09 05:39:49 1.1.1.1 --- proto_internal.c 1996/06/04 16:25:57 *************** *** 49,55 **** static int init_status; /* Performs global initialization. */ ! char *_proto_init() { char *buf; --- 49,55 ---- static int init_status; /* Performs global initialization. */ ! char *_proto_buf() { char *buf; *************** *** 75,78 **** { init_status = pthread_key_create(&key, free); } - --- 75,77 ---- =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/net/res_internal.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 res_internal.c *** res_internal.c 1996/02/09 05:39:53 1.1.1.1 --- res_internal.c 1996/09/25 23:31:11 *************** *** 144,149 **** --- 144,150 ---- break; cp += n; result->h_name = bp; + bp += strlen(bp) + 1; iquery_done = 1; break; } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/GNUmakefile.inc,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 GNUmakefile.inc *** GNUmakefile.inc 1995/08/30 22:27:04 1.1.1.1 --- GNUmakefile.inc 1996/10/02 19:04:29 *************** *** 8,14 **** syscall.S pthread_join.c pthread_detach.c pthread_once.c sleep.c \ specific.c process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \ ! dump_state.c pthread_kill.c stat.c readv.c writev.c condattr.c $(SRCS) ifeq ($(HAVE_SYSCALL_TEMPLATE),yes) SYSCALL_FILTER_RULE= for s in $(AVAILABLE_SYSCALLS) ; do \ --- 8,15 ---- syscall.S pthread_join.c pthread_detach.c pthread_once.c sleep.c \ specific.c process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \ ! dump_state.c pthread_kill.c stat.c readv.c writev.c condattr.c \ ! pthread_cancel.c panic.c $(SRCS) ifeq ($(HAVE_SYSCALL_TEMPLATE),yes) SYSCALL_FILTER_RULE= for s in $(AVAILABLE_SYSCALLS) ; do \ =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/Makefile.inc,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 Makefile.inc *** Makefile.inc 1995/08/22 22:09:07 1.1.1.1 --- Makefile.inc 1996/10/02 19:04:38 *************** *** 8,14 **** pthread_join.c pthread_detach.c pthread_once.c sleep.c specific.c \ process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \ ! dump_state.c pthread_kill.c condattr.c .if $(HAVE_SYSCALL_TEMPLATE) == yes OBJS+= syscalls.o --- 8,14 ---- pthread_join.c pthread_detach.c pthread_once.c sleep.c specific.c \ process.c wait.c errno.c schedparam.c _exit.c prio_queue.c \ pthread_init.c init.cc sig.c info.c mutexattr.c select.c wrapper.c \ ! dump_state.c pthread_kill.c condattr.c pthread_cancel.c panic.c .if $(HAVE_SYSCALL_TEMPLATE) == yes OBJS+= syscalls.o =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/cond.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 cond.c *** cond.c 1996/03/05 08:29:12 1.1.1.1 --- cond.c 1996/10/03 18:19:04 *************** *** 188,197 **** --- 188,204 ---- pthread_queue_enq(&cond->c_queue, pthread_run); pthread_mutex_unlock(mutex); + pthread_run->data.mutex = mutex; + SET_PF_WAIT_EVENT(pthread_run); + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ /* Reschedule will unlock pthread_run */ pthread_resched_resume(PS_COND_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ CLEAR_PF_DONE_EVENT(pthread_run); + + pthread_run->data.mutex = NULL; + rval = pthread_mutex_lock(mutex); return(rval); break; *************** *** 203,212 **** --- 210,226 ---- pthread_mutex_unlock(mutex); mutex->m_data.m_count = 1; + pthread_run->data.mutex = mutex; + SET_PF_WAIT_EVENT(pthread_run); + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ /* Reschedule will unlock pthread_run */ pthread_resched_resume(PS_COND_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ CLEAR_PF_DONE_EVENT(pthread_run); + + pthread_run->data.mutex = NULL; + rval = pthread_mutex_lock(mutex); mutex->m_data.m_count = count; return(rval); *************** *** 258,265 **** --- 272,285 ---- SET_PF_WAIT_EVENT(pthread_run); pthread_mutex_unlock(mutex); + pthread_run->data.mutex = mutex; + + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ /* Reschedule will unlock pthread_run */ pthread_resched_resume(PS_COND_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ + + pthread_run->data.mutex = NULL; /* Remove ourselves from sleep queue. If we fail then we timedout */ if (sleep_cancel(pthread_run) == NOTOK) { *************** *** 285,292 **** --- 305,318 ---- SET_PF_WAIT_EVENT(pthread_run); pthread_mutex_unlock(mutex); + pthread_run->data.mutex = mutex; + + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ /* Reschedule will unlock pthread_run */ pthread_resched_resume(PS_COND_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ + + pthread_run->data.mutex = NULL; /* Remove ourselves from sleep queue. If we fail then we timedout */ if (sleep_cancel(pthread_run) == NOTOK) { =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/fd.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 fd.c *** fd.c 1996/02/09 02:54:19 1.1.1.1 --- fd.c 1996/10/03 01:33:03 *************** *** 48,54 **** --- 48,59 ---- #include #include #include + #include + #if __STDC__ #include + #else + #include + #endif #include #include #include *************** *** 62,67 **** --- 67,74 ---- static const int dtablecount = 4096/sizeof(struct fd_table_entry); int dtablesize; + static int fd_get_pthread_fd_from_kernel_fd( int ); + /* ========================================================================== * Allocate dtablecount entries at once and populate the fd_table. * *************** *** 199,204 **** --- 206,244 ---- return(NOTOK); } + /*---------------------------------------------------------------------- + * Function: fd_get_pthread_fd_from_kernel_fd + * Purpose: get the fd_table index of a kernel fd + * Args: fd = kernel fd to convert + * Returns: fd_table index, -1 if not found + * Notes: + *----------------------------------------------------------------------*/ + static int + fd_get_pthread_fd_from_kernel_fd( int kfd ) + { + int j; + + /* This is *SICK*, but unless there is a faster way to + * turn a kernel fd into an fd_table index, this has to do. + */ + for( j=0; j < dtablesize; j++ ) { + if( fd_table[j] && + fd_table[j]->type != FD_NT && + fd_table[j]->type != FD_NIU && + fd_table[j]->fd.i == kfd ) { + return j; + } + } + + /* Not listed byfd, Check for kernel fd == pthread fd */ + if( fd_table[kfd] == NULL || fd_table[kfd]->type == FD_NT ) { + /* Assume that the kernel fd is the same */ + return kfd; + } + + return NOTOK; /* Not found */ + } + /* ========================================================================== * fd_basic_basic_unlock() * *************** *** 288,293 **** --- 328,334 ---- switch (fd_table[fd]->type) { case FD_NIU: /* If not in use return EBADF error */ + SET_ERRNO(EBADF); return(NOTOK); break; case FD_NT: *************** *** 297,302 **** --- 338,344 ---- */ fd_kern_init(fd); if (fd_table[fd]->type == FD_NIU) { + SET_ERRNO(EBADF); return(NOTOK); } break; *************** *** 409,414 **** --- 451,545 ---- return(OK); } + /*---------------------------------------------------------------------- + * Function: fd_unlock_for_cancel + * Purpose: Unlock all fd locks held prior to being cancelled + * Args: void + * Returns: + * OK or NOTOK + * Notes: + * Assumes the kernel is locked on entry + *----------------------------------------------------------------------*/ + int + fd_unlock_for_cancel( void ) + { + int i, fd; + struct pthread_select_data *data; + int rdlk, wrlk, lktype; + int found; + + /* What we do depends on the previous state of the thread */ + switch( pthread_run->old_state ) { + case PS_RUNNING: + case PS_JOIN: + case PS_SLEEP_WAIT: + case PS_WAIT_WAIT: + case PS_SIGWAIT: + case PS_FDLR_WAIT: + case PS_FDLW_WAIT: + case PS_DEAD: + case PS_UNALLOCED: + break; /* Nothing to do */ + + case PS_COND_WAIT: + CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP ); + /* Must reaquire the mutex according to the standard */ + if( pthread_run->data.mutex == NULL ) { + PANIC(); + } + pthread_mutex_lock( pthread_run->data.mutex ); + break; + + case PS_FDR_WAIT: + CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP); + /* Free the lock on the fd being used */ + fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd ); + if( fd == NOTOK ) { + PANIC(); /* Can't find fd */ + } + fd_unlock( fd, FD_READ ); + break; + + case PS_FDW_WAIT: /* Waiting on i/o */ + CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP); + /* Free the lock on the fd being used */ + fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd ); + if( fd == NOTOK ) { + PANIC(); /* Can't find fd */ + } + fd_unlock( fd, FD_WRITE ); + break; + + case PS_SELECT_WAIT: + data = pthread_run->data.select_data; + + CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP); + + for( i = 0; i < data->nfds; i++) { + rdlk =(FD_ISSET(i,&data->readfds) + || FD_ISSET(i,&data->exceptfds)); + wrlk = FD_ISSET(i, &data->writefds); + lktype = rdlk ? (wrlk ? FD_RDWR : FD_READ) : FD_WRITE; + + if( ! (rdlk || wrlk) ) + continue; /* No locks, no unlock */ + + if( (fd = fd_get_pthread_fd_from_kernel_fd( i )) == NOTOK ) { + PANIC(); /* Can't find fd */ + } + + fd_unlock( fd, lktype ); + } + break; + + case PS_MUTEX_WAIT: + PANIC(); /* Should never cancel a mutex wait */ + + default: + PANIC(); /* Unknown thread status */ + } + } + /* ========================================================================== * fd_lock() */ *************** *** 476,481 **** --- 607,616 ---- ret = fd_table[fd]->ops->read(fd_table[fd]->fd, fd_table[fd]->flags, buf, nbytes, timeout); fd_unlock(fd, FD_READ); + if( ret < 0 ) { + SET_ERRNO(-ret); + ret = NOTOK; + } } return(ret); } *************** *** 500,505 **** --- 635,644 ---- ret = fd_table[fd]->ops->readv(fd_table[fd]->fd, fd_table[fd]->flags, iov, iovcnt, timeout); fd_unlock(fd, FD_READ); + if( ret < 0 ) { + SET_ERRNO(-ret); + ret = NOTOK; + } } return(ret); } *************** *** 524,529 **** --- 663,672 ---- ret = fd_table[fd]->ops->write(fd_table[fd]->fd, fd_table[fd]->flags, buf, nbytes, timeout); fd_unlock(fd, FD_WRITE); + if( ret < 0 ) { + SET_ERRNO(-ret); + ret = NOTOK; + } } return(ret); } *************** *** 548,553 **** --- 691,700 ---- ret = fd_table[fd]->ops->writev(fd_table[fd]->fd, fd_table[fd]->flags, iov, iovcnt, timeout); fd_unlock(fd, FD_WRITE); + if( ret < 0 ) { + SET_ERRNO(-ret); + ret = NOTOK; + } } return(ret); } *************** *** 599,677 **** union fd_data realfd; int ret, flags; /* Need to lock the newfd by hand */ ! if (fd < dtablesize) { ! pthread_mutex_lock(&fd_table_mutex); ! if (fd_table[fd]) { ! pthread_mutex_unlock(&fd_table_mutex); ! mutex = &(fd_table[fd]->mutex); ! pthread_mutex_lock(mutex); ! /* ! * XXX Gross hack ... because of fork(), any fd closed by the ! * parent should not change the fd of the child, unless it owns it. */ ! switch(fd_table[fd]->type) { ! case FD_NIU: ! pthread_mutex_unlock(mutex); ! ret = -EINVAL; ! break; ! case FD_NT: ! /* ! * If it's not tested then the only valid possibility is it's ! * kernel fd. ! */ ! ret = machdep_sys_close(fd); ! fd_table[fd]->type = FD_NIU; ! pthread_mutex_unlock(mutex); ! break; ! case FD_TEST_FULL_DUPLEX: ! case FD_TEST_HALF_DUPLEX: realfd = fd_table[fd]->fd; flags = fd_table[fd]->flags; if ((entry = fd_free(fd)) == NULL) { ! ret = fd_table[fd]->ops->close(realfd, flags); } else { ! /* There can't be any others waiting for fd. */ pthread_mutex_unlock(&entry->mutex); /* Note: entry->mutex = mutex */ - mutex = &(fd_table[fd]->mutex); } pthread_mutex_unlock(mutex); - break; - default: - ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL); - if (ret == OK) { - realfd = fd_table[fd]->fd; - flags = fd_table[fd]->flags; - pthread_mutex_unlock(mutex); - if ((entry = fd_free(fd)) == NULL) { - ret = fd_table[fd]->ops->close(realfd, flags); - } else { - fd_basic_basic_unlock(entry, FD_RDWR); - pthread_mutex_unlock(&entry->mutex); - /* Note: entry->mutex = mutex */ - } - fd_unlock(fd, FD_RDWR); - } else { - pthread_mutex_unlock(mutex); - } - break; } ! } else { ! /* Don't bother creating a table entry */ ! pthread_mutex_unlock(&fd_table_mutex); ! ret = machdep_sys_close(fd); } ! return(ret); } ! return(-EINVAL); } /* ========================================================================== * fd_basic_dup() * * Might need to do more than just what's below. */ static inline void fd_basic_dup(int fd, int newfd) { --- 746,836 ---- union fd_data realfd; int ret, flags; + if( fd < 0 || fd >= dtablesize ) { + SET_ERRNO(EBADF); + return -1; + } + /* Need to lock the newfd by hand */ ! pthread_mutex_lock(&fd_table_mutex); ! if (fd_table[fd]) { ! pthread_mutex_unlock(&fd_table_mutex); ! mutex = &(fd_table[fd]->mutex); ! pthread_mutex_lock(mutex); ! /* ! * XXX Gross hack ... because of fork(), any fd closed by the ! * parent should not change the fd of the child, unless it owns it. ! */ ! switch(fd_table[fd]->type) { ! case FD_NIU: ! pthread_mutex_unlock(mutex); ! ret = -EBADF; ! break; ! case FD_NT: ! /* ! * If it's not tested then the only valid possibility is it's ! * kernel fd. */ ! ret = machdep_sys_close(fd); ! fd_table[fd]->type = FD_NIU; ! pthread_mutex_unlock(mutex); ! break; ! case FD_TEST_FULL_DUPLEX: ! case FD_TEST_HALF_DUPLEX: ! realfd = fd_table[fd]->fd; ! flags = fd_table[fd]->flags; ! if ((entry = fd_free(fd)) == NULL) { ! ret = fd_table[fd]->ops->close(realfd, flags); ! } else { ! /* There can't be any others waiting for fd. */ ! pthread_mutex_unlock(&entry->mutex); ! /* Note: entry->mutex = mutex */ ! mutex = &(fd_table[fd]->mutex); ! } ! pthread_mutex_unlock(mutex); ! break; ! default: ! ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL); ! if (ret == OK) { realfd = fd_table[fd]->fd; flags = fd_table[fd]->flags; + pthread_mutex_unlock(mutex); if ((entry = fd_free(fd)) == NULL) { ! ret = fd_table[fd]->ops->close(realfd, flags); } else { ! fd_basic_basic_unlock(entry, FD_RDWR); pthread_mutex_unlock(&entry->mutex); /* Note: entry->mutex = mutex */ } + fd_unlock(fd, FD_RDWR); + } else { pthread_mutex_unlock(mutex); } ! break; } ! } else { ! /* Don't bother creating a table entry */ ! pthread_mutex_unlock(&fd_table_mutex); ! ret = machdep_sys_close(fd); ! } ! ! if( ret < 0 ) { ! SET_ERRNO(-ret); ! ret = -1; } ! ! return ret; } /* ========================================================================== * fd_basic_dup() * * Might need to do more than just what's below. + * + * This is a MAJOR guess!! I don't know if the mutext unlock is valid + * in the BIG picture. But it seems to be needed to avoid deadlocking + * with ourselves when we try to close the duped file descriptor. */ static inline void fd_basic_dup(int fd, int newfd) { *************** *** 679,684 **** --- 838,845 ---- fd_table[fd]->next = fd_table[newfd]; fd_table[newfd] = fd_table[fd]; fd_table[fd]->count++; + pthread_mutex_unlock(&fd_table[newfd]->next->mutex); + } /* ========================================================================== *************** *** 896,904 **** * ala select()... --SNL */ int ! ioctl(int fd, unsigned long request, caddr_t arg) { int ret; if (fd < 0 || fd >= dtablesize) ret = NOTOK; --- 1057,1071 ---- * ala select()... --SNL */ int ! ioctl(int fd, int request, ...) { int ret; + pthread_va_list ap; + caddr_t arg; + + va_start( ap, request ); /* Get the arg */ + arg = va_arg(ap,caddr_t); + va_end( ap ); if (fd < 0 || fd >= dtablesize) ret = NOTOK; *************** *** 906,911 **** --- 1073,1086 ---- ret = machdep_sys_ioctl(fd, request, arg); else if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { ret = machdep_sys_ioctl(fd_table[fd]->fd.i, request, arg); + if( ret == 0 && request == FIONBIO ) { + /* Properly set NONBLOCK flag */ + int v = *(int *)arg; + if( v ) + fd_table[fd]->flags |= __FD_NONBLOCK; + else + fd_table[fd]->flags &= ~__FD_NONBLOCK; + } fd_unlock(fd, FD_RDWR); } return ret; =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/fd_kern.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 fd_kern.c *** fd_kern.c 1996/02/12 00:58:30 1.1.1.1 --- fd_kern.c 1996/10/03 01:54:15 *************** *** 128,134 **** if ((count = machdep_sys_select(dtablesize, &fd_set_read, ! &fd_set_write, NULL, &__fd_kern_poll_timeout)) < OK) { if (count == -EINTR) { return; } --- 128,134 ---- if ((count = machdep_sys_select(dtablesize, &fd_set_read, ! &fd_set_write, &fd_set_except, &__fd_kern_poll_timeout)) < OK) { if (count == -EINTR) { return; } *************** *** 167,200 **** for (pthread = fd_wait_select.q_next; count && pthread; ) { int found_one = 0; for (i = 0; i < pthread->data.select_data->nfds; i++) { int count_dec = 0; ! if ((FD_ISSET(i, &pthread->data.select_data->exceptfds) && ! ! FD_ISSET(i, &fd_set_except))) { ! FD_CLR(i, &pthread->data.select_data->exceptfds); ! } else { ! count_dec++; } ! if ((FD_ISSET(i, &pthread->data.select_data->writefds) && ! ! FD_ISSET(i, &fd_set_write))) { ! FD_CLR(i, &pthread->data.select_data->writefds); ! } else { ! count_dec++; } ! if ((FD_ISSET(i, &pthread->data.select_data->readfds) && ! ! FD_ISSET(i, &fd_set_read))) { ! FD_CLR(i, &pthread->data.select_data->readfds); ! } else { ! count_dec++; } if (count_dec) { found_one++; count--; } } if (found_one) { deq = pthread; pthread = pthread->next; pthread_queue_remove(&fd_wait_select, deq); --- 167,223 ---- for (pthread = fd_wait_select.q_next; count && pthread; ) { int found_one = 0; + fd_set tmp_readfds, tmp_writefds, tmp_exceptfds; + + memcpy(&tmp_readfds, &pthread->data.select_data->readfds, + sizeof(fd_set)); + memcpy(&tmp_writefds, &pthread->data.select_data->writefds, + sizeof(fd_set)); + memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds, + sizeof(fd_set)); for (i = 0; i < pthread->data.select_data->nfds; i++) { int count_dec = 0; ! if( (FD_ISSET(i, &tmp_exceptfds)) ) { ! if( FD_ISSET(i, &fd_set_except) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_exceptfds); ! } } ! ! if( (FD_ISSET(i, &tmp_writefds)) ) { ! if( FD_ISSET(i, &fd_set_write) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_writefds); ! } } ! ! if( (FD_ISSET(i, &tmp_readfds)) ) { ! if( FD_ISSET(i, &fd_set_read) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_readfds); ! } } + if (count_dec) { found_one++; count--; } } + if (found_one) { + /* Update the threads saved select data fd sets */ + memcpy(&pthread->data.select_data->readfds, &tmp_readfds, + sizeof(fd_set)); + memcpy(&pthread->data.select_data->writefds, &tmp_writefds, + sizeof(fd_set)); + memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds, + sizeof(fd_set)); + deq = pthread; pthread = pthread->next; pthread_queue_remove(&fd_wait_select, deq); *************** *** 266,272 **** */ while ((count = machdep_sys_select(dtablesize, &fd_set_read, ! &fd_set_write, NULL, &__fd_kern_wait_timeout)) < OK) { if (count == -EINTR) { return; } --- 289,295 ---- */ while ((count = machdep_sys_select(dtablesize, &fd_set_read, ! &fd_set_write, &fd_set_except, &__fd_kern_wait_timeout)) < OK) { if (count == -EINTR) { return; } *************** *** 305,338 **** for (pthread = fd_wait_select.q_next; count && pthread; ) { int found_one = 0; for (i = 0; i < pthread->data.select_data->nfds; i++) { int count_dec = 0; ! if ((FD_ISSET(i, &pthread->data.select_data->exceptfds) && ! ! FD_ISSET(i, &fd_set_except))) { ! FD_CLR(i, &pthread->data.select_data->exceptfds); ! } else { ! count_dec++; } ! if ((FD_ISSET(i, &pthread->data.select_data->writefds) && ! ! FD_ISSET(i, &fd_set_write))) { ! FD_CLR(i, &pthread->data.select_data->writefds); ! } else { ! count_dec++; } ! if ((FD_ISSET(i, &pthread->data.select_data->readfds) && ! ! FD_ISSET(i, &fd_set_read))) { ! FD_CLR(i, &pthread->data.select_data->readfds); ! } else { ! count_dec++; } if (count_dec) { found_one++; count--; } } if (found_one) { deq = pthread; pthread = pthread->next; pthread_queue_remove(&fd_wait_select, deq); --- 328,383 ---- for (pthread = fd_wait_select.q_next; count && pthread; ) { int found_one = 0; + fd_set tmp_readfds, tmp_writefds, tmp_exceptfds; + + memcpy(&tmp_readfds, &pthread->data.select_data->readfds, + sizeof(fd_set)); + memcpy(&tmp_writefds, &pthread->data.select_data->writefds, + sizeof(fd_set)); + memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds, + sizeof(fd_set)); for (i = 0; i < pthread->data.select_data->nfds; i++) { int count_dec = 0; ! if( (FD_ISSET(i, &tmp_exceptfds)) ) { ! if( FD_ISSET(i, &fd_set_except) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_exceptfds); ! } } ! ! if( (FD_ISSET(i, &tmp_writefds)) ) { ! if( FD_ISSET(i, &fd_set_write) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_writefds); ! } } ! ! if( (FD_ISSET(i, &tmp_readfds)) ) { ! if( FD_ISSET(i, &fd_set_read) ) { ! count_dec++; /* got a hit */ ! } else { ! FD_CLR(i, &tmp_readfds); ! } } + if (count_dec) { found_one++; count--; } } if (found_one) { + /* Update the threads saved select data fd sets */ + memcpy(&pthread->data.select_data->readfds, &tmp_readfds, + sizeof(fd_set)); + memcpy(&pthread->data.select_data->writefds, &tmp_writefds, + sizeof(fd_set)); + memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds, + sizeof(fd_set)); + deq = pthread; pthread = pthread->next; pthread_queue_remove(&fd_wait_select, deq); *************** *** 380,404 **** machdep_gettimeofday(¤t_time); sleep_schedule(¤t_time, timeout); pthread_resched_resume(PS_FDR_WAIT); /* We're awake */ pthread_sched_prevent(); if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); - SET_ERRNO(ETIMEDOUT); ret = -ETIMEDOUT; break; } pthread_sched_resume(); } else { pthread_resched_resume(PS_FDR_WAIT); } CLEAR_PF_DONE_EVENT(pthread_run); } else { - SET_ERRNO(-ret); - ret = NOTOK; break; } } --- 425,450 ---- machdep_gettimeofday(¤t_time); sleep_schedule(¤t_time, timeout); + SET_PF_AT_CANCEL_POINT(pthread_run); pthread_resched_resume(PS_FDR_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* We're awake */ pthread_sched_prevent(); if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); ret = -ETIMEDOUT; break; } pthread_sched_resume(); } else { + SET_PF_AT_CANCEL_POINT(pthread_run); pthread_resched_resume(PS_FDR_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); } CLEAR_PF_DONE_EVENT(pthread_run); } else { break; } } *************** *** 437,443 **** if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); - SET_ERRNO(ETIMEDOUT); ret = -ETIMEDOUT; break; } --- 483,488 ---- *************** *** 447,454 **** } CLEAR_PF_DONE_EVENT(pthread_run); } else { - SET_ERRNO(-ret); - ret = NOTOK; break; } } --- 492,497 ---- *************** *** 480,504 **** machdep_gettimeofday(¤t_time); sleep_schedule(¤t_time, timeout); pthread_resched_resume(PS_FDW_WAIT); /* We're awake */ pthread_sched_prevent(); if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); - SET_ERRNO(ETIMEDOUT); ret = -ETIMEDOUT; break; } pthread_sched_resume(); } else { pthread_resched_resume(PS_FDW_WAIT); } CLEAR_PF_DONE_EVENT(pthread_run); } else { - SET_ERRNO(-ret); - ret = NOTOK; break; } } --- 523,548 ---- machdep_gettimeofday(¤t_time); sleep_schedule(¤t_time, timeout); + SET_PF_AT_CANCEL_POINT(pthread_run); pthread_resched_resume(PS_FDW_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* We're awake */ pthread_sched_prevent(); if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); ret = -ETIMEDOUT; break; } pthread_sched_resume(); } else { + SET_PF_AT_CANCEL_POINT(pthread_run); pthread_resched_resume(PS_FDW_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); } CLEAR_PF_DONE_EVENT(pthread_run); } else { break; } } *************** *** 537,543 **** if (sleep_cancel(pthread_run) == NOTOK) { CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); - SET_ERRNO(ETIMEDOUT); ret = -ETIMEDOUT; break; } --- 581,586 ---- *************** *** 547,554 **** } CLEAR_PF_DONE_EVENT(pthread_run); } else { - SET_ERRNO(-ret); - ret = NOTOK; break; } } --- 590,595 ---- *************** *** 662,668 **** */ int create(const char *path, mode_t mode) { ! return creat (path, mode); } /* ========================================================================== --- 703,709 ---- */ int create(const char *path, mode_t mode) { ! return creat (path, mode); } /* ========================================================================== *************** *** 672,678 **** int creat(const char *path, mode_t mode) { ! return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode); } /* ========================================================================== --- 713,719 ---- int creat(const char *path, mode_t mode) { ! return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode); } /* ========================================================================== *************** *** 1079,1090 **** int bind(int fd, const struct sockaddr *name, int namelen) { /* Not much to do in bind */ - semaphore *plock; int ret; if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) { SET_ERRNO(-ret); } fd_unlock(fd, FD_RDWR); } --- 1120,1131 ---- int bind(int fd, const struct sockaddr *name, int namelen) { /* Not much to do in bind */ int ret; if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) { SET_ERRNO(-ret); + ret = NOTOK; } fd_unlock(fd, FD_RDWR); } *************** *** 1100,1113 **** */ int connect(int fd, const struct sockaddr *name, int namelen) { ! struct sockaddr tmpname; ! int ret, tmpnamelen; ! if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) { if (!(fd_table[fd]->flags & __FD_NONBLOCK) && ! ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) || ! (ret == -EALREADY) || (ret == -EAGAIN))) { pthread_sched_prevent(); /* queue pthread for a FDW_WAIT */ --- 1141,1154 ---- */ int connect(int fd, const struct sockaddr *name, int namelen) { ! struct sockaddr tmpname; ! int ret, tmpnamelen; ! if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) { if (!(fd_table[fd]->flags & __FD_NONBLOCK) && ! ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) || ! (ret == -EALREADY) || (ret == -EAGAIN))) { pthread_sched_prevent(); /* queue pthread for a FDW_WAIT */ *************** *** 1121,1131 **** tmpnamelen = sizeof(tmpname); /* OK now lets see if it really worked */ if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i, ! &tmpname, &tmpnamelen)) < OK) && (ret == -ENOTCONN)) { /* Get the error, this function should not fail */ machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET, ! SO_ERROR, &pthread_run->error, &tmpnamelen); } } else { SET_ERRNO(-ret); --- 1162,1180 ---- tmpnamelen = sizeof(tmpname); /* OK now lets see if it really worked */ if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i, ! &tmpname, &tmpnamelen)) < OK) ! && (ret == -ENOTCONN)) { /* Get the error, this function should not fail */ machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET, ! SO_ERROR, &ret, &tmpnamelen); ! SET_ERRNO(-ret); ! ret = NOTOK; ! } else { ! if( ret < 0 ) { ! SET_ERRNO(-ret); ! ret = NOTOK; ! } } } else { SET_ERRNO(-ret); *************** *** 1133,1140 **** } } fd_unlock(fd, FD_RDWR); ! } ! return(ret); } #endif --- 1182,1189 ---- } } fd_unlock(fd, FD_RDWR); ! } ! return(ret); } #endif *************** *** 1164,1170 **** } else { fd_unlock(fd, FD_RDWR); SET_ERRNO(-fd_kern); ! return(fd_kern); } } fd_unlock(fd, FD_RDWR); --- 1213,1219 ---- } else { fd_unlock(fd, FD_RDWR); SET_ERRNO(-fd_kern); ! return(NOTOK); } } fd_unlock(fd, FD_RDWR); *************** *** 1198,1205 **** int ret; if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { ! ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog); ! if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) { SET_ERRNO(-ret); ret = NOTOK; } --- 1247,1253 ---- int ret; if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) { ! if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) { SET_ERRNO(-ret); ret = NOTOK; } *************** *** 1246,1252 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1294,1300 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1311,1317 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1359,1365 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1405,1411 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1453,1459 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1471,1477 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1519,1525 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1536,1542 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1584,1590 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1603,1609 **** CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = -ETIMEDOUT; break; } pthread_sched_resume(); --- 1651,1657 ---- CLEAR_PF_DONE_EVENT(pthread_run); pthread_sched_resume(); SET_ERRNO(ETIMEDOUT); ! ret = NOTOK; break; } pthread_sched_resume(); *************** *** 1734,1744 **** */ int getsockopt(int fd, int level, int optname, void * optval, int * optlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level, ! optname, optval, optlen)) < OK) { SET_ERRNO(-ret); ret = NOTOK; } --- 1782,1792 ---- */ int getsockopt(int fd, int level, int optname, void * optval, int * optlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level, ! optname, optval, optlen)) < OK) { SET_ERRNO(-ret); ret = NOTOK; } *************** *** 1756,1772 **** */ int getsockname(int fd, struct sockaddr * name, int * naddrlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { ! if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i, ! name, naddrlen)) < OK) { ! SET_ERRNO(-ret); ! ret = NOTOK; ! } ! fd_unlock(fd, FD_RDWR); ! } ! return ret; } #endif --- 1804,1820 ---- */ int getsockname(int fd, struct sockaddr * name, int * naddrlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { ! if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i, ! name, naddrlen)) < OK) { ! SET_ERRNO(-ret); ! ret = NOTOK; ! } ! fd_unlock(fd, FD_RDWR); ! } ! return ret; } #endif *************** *** 1778,1793 **** */ int getpeername(int fd, struct sockaddr * peer, int * paddrlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { ! if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i, ! peer, paddrlen)) < OK) { SET_ERRNO(-ret); ret = NOTOK; ! } ! fd_unlock(fd, FD_READ); ! } return ret; } --- 1826,1841 ---- */ int getpeername(int fd, struct sockaddr * peer, int * paddrlen) { ! int ret; ! if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { ! if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i, ! peer, paddrlen)) < OK) { SET_ERRNO(-ret); ret = NOTOK; ! } ! fd_unlock(fd, FD_READ); ! } return ret; } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 pthread.c *** pthread.c 1995/12/13 05:53:01 1.1.1.1 --- pthread.c 1996/10/01 21:42:01 *************** *** 129,134 **** --- 129,160 ---- } + /*---------------------------------------------------------------------- + * Function: __pthread_is_valid + * Purpose: Scan the list of threads to see if a specified thread exists + * Args: + * pthread = The thread to scan for + * Returns: + * int = 1 if found, 0 if not + * Notes: + * The kernel is assumed to be locked + *----------------------------------------------------------------------*/ + int + __pthread_is_valid( pthread_t pthread ) + { + int rtn = 0; /* Assume not found */ + pthread_t t; + + for( t = pthread_link_list; t; t = t->pll ) { + if( t == pthread ) { + rtn = 1; /* Found it */ + break; + } + } + + return rtn; + } + /* ========================================================================== * __pthread_free() */ *************** *** 242,247 **** --- 268,277 ---- new_thread->next = NULL; new_thread->flags = 0; + /* PTHREADS spec says we start with cancellability on and deferred */ + SET_PF_CANCEL_STATE(new_thread, PTHREAD_CANCEL_ENABLE); + SET_PF_CANCEL_TYPE(new_thread, PTHREAD_CANCEL_DEFERRED); + new_thread->error_p = NULL; new_thread->sll = NULL; *************** *** 261,269 **** } return(retval); } - - /* ========================================================================== - * pthread_cancel() - * - * This routine will also require a sig_prevent/sig_check_and_resume() - */ --- 291,293 ---- =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread_init.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 pthread_init.c *** pthread_init.c 1996/03/13 04:33:10 1.1.1.1 --- pthread_init.c 1996/10/01 21:43:59 *************** *** 92,99 **** pthread_initial->next = NULL; pthread_initial->flags = 0; pthread_initial->pll = NULL; - pthread_initial->flags = 0; pthread_initial->sll = NULL; /* Ugly errno hack */ pthread_initial->error_p = &errno; --- 92,103 ---- pthread_initial->next = NULL; pthread_initial->flags = 0; pthread_initial->pll = NULL; pthread_initial->sll = NULL; + + /* PTHREADS spec says we start with cancellability on and deferred */ + SET_PF_CANCEL_STATE(pthread_initial, PTHREAD_CANCEL_ENABLE); + SET_PF_CANCEL_TYPE(pthread_initial, PTHREAD_CANCEL_DEFERRED); + /* Ugly errno hack */ pthread_initial->error_p = &errno; =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/pthread_join.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 pthread_join.c *** pthread_join.c 1995/12/13 05:53:07 1.1.1.1 --- pthread_join.c 1996/10/02 16:54:36 *************** *** 42,47 **** --- 42,49 ---- #include #include + static int testDeadlock( struct pthread_queue *queue, pthread_t target ); + /* ========================================================================== * pthread_join() */ *************** *** 51,56 **** --- 53,64 ---- pthread_sched_prevent(); + /* Ensure they gave us a legal pthread pointer */ + if( ! __pthread_is_valid( pthread ) ) { + pthread_sched_resume(); + return(EINVAL); + } + /* Check that thread isn't detached already */ if (pthread->attr.flags & PTHREAD_DETACHED) { pthread_sched_resume(); *************** *** 62,81 **** * Note: This must happen after checking detached state. */ if (pthread_queue_remove(&pthread_dead_queue, pthread) != OK) { ! pthread_queue_enq(&(pthread->join_queue), pthread_run); ! pthread_resched_resume(PS_JOIN); ! pthread_sched_prevent(); ! ! if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) { ! pthread_queue_enq(&pthread_alloc_queue, pthread); ! pthread->attr.flags |= PTHREAD_DETACHED; ! pthread->state = PS_UNALLOCED; ! if (thread_return) { ! *thread_return = pthread->ret; ! } ! ret = OK; } else { ! ret = ESRCH; } } else { /* Just get the return value and detach the thread */ --- 70,98 ---- * Note: This must happen after checking detached state. */ if (pthread_queue_remove(&pthread_dead_queue, pthread) != OK) { ! ! /* Before we pend on the join, ensure there is no dead lock */ ! ! if( testDeadlock( &pthread_run->join_queue, pthread ) == NOTOK ) { ! ret = EDEADLK; } else { ! pthread_queue_enq(&(pthread->join_queue), pthread_run); ! SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ ! pthread_resched_resume(PS_JOIN); ! CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ ! pthread_sched_prevent(); ! ! if (pthread_queue_remove(&pthread_dead_queue, pthread) == OK) { ! pthread_queue_enq(&pthread_alloc_queue, pthread); ! pthread->attr.flags |= PTHREAD_DETACHED; ! pthread->state = PS_UNALLOCED; ! if (thread_return) { ! *thread_return = pthread->ret; ! } ! ret = OK; ! } else { ! ret = ESRCH; ! } } } else { /* Just get the return value and detach the thread */ *************** *** 89,92 **** --- 106,139 ---- } pthread_sched_resume(); return(ret); + } + + /*---------------------------------------------------------------------- + * Function: testDeadlock + * Purpose: recursive queue walk to check for deadlocks + * Args: + * queue = the queue to walk + * pthread = target to scan for + * Returns: + * OK = no deadlock, NOTOK = deadlock + * Notes: + *----------------------------------------------------------------------*/ + static int + testDeadlock( struct pthread_queue *queue, pthread_t target ) + { + pthread_t t; + + if( queue == NULL ) + return OK; /* Empty queue, obviously ok */ + + for( t = queue->q_next; t; t = t->next ) { + if( t == target ) + return NOTOK; /* bang, your dead */ + + if( testDeadlock( &t->join_queue, target ) == NOTOK ) { + return NOTOK; + } + } + + return OK; /* No deadlock */ } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/select.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 select.c *** select.c 1996/03/05 08:29:14 1.1.1.1 --- select.c 1996/10/02 16:56:27 *************** *** 56,220 **** int select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { ! fd_set real_exceptfds, real_readfds, real_writefds; /* mapped fd_sets */ ! fd_set * real_readfds_p, * real_writefds_p, * real_exceptfds_p; ! fd_set read_locks, write_locks, rdwr_locks; ! struct timespec timeout_time, current_time; ! struct timeval zero_timeout = { 0, 0 }; ! int i, j, ret = 0, got_all_locks = 1; ! struct pthread_select_data data; ! ! if (numfds > dtablesize) { ! numfds = dtablesize; ! } ! ! data.nfds = 0; ! FD_ZERO(&data.readfds); ! FD_ZERO(&data.writefds); ! FD_ZERO(&data.exceptfds); ! /* Do this first */ ! if (timeout) { machdep_gettimeofday(¤t_time); ! timeout_time.tv_sec = current_time.tv_sec + timeout->tv_sec; ! if ((timeout_time.tv_nsec = current_time.tv_nsec + ! (timeout->tv_usec * 1000)) > 1000000000) { ! timeout_time.tv_nsec -= 1000000000; ! timeout_time.tv_sec++; ! } ! } ! ! FD_ZERO(&read_locks); ! FD_ZERO(&write_locks); ! FD_ZERO(&rdwr_locks); ! FD_ZERO(&real_readfds); ! FD_ZERO(&real_writefds); ! FD_ZERO(&real_exceptfds); ! ! /* lock readfds */ ! if (readfds || writefds || exceptfds) { ! for (i = 0; i < numfds; i++) { ! if ((readfds && (FD_ISSET(i, readfds))) || ! (exceptfds && FD_ISSET(i, exceptfds))) { ! if (writefds && FD_ISSET(i ,writefds)) { ! if ((ret = fd_lock(i, FD_RDWR, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &rdwr_locks); ! FD_SET(fd_table[i]->fd.i,&real_writefds); ! } else { ! if ((ret = fd_lock(i, FD_READ, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &read_locks); ! } ! if (readfds && FD_ISSET(i,readfds)) { ! FD_SET(fd_table[i]->fd.i, &real_readfds); ! } ! if (exceptfds && FD_ISSET(i,exceptfds)) { ! FD_SET(fd_table[i]->fd.i, &real_exceptfds); ! } ! if (fd_table[i]->fd.i >= data.nfds) { ! data.nfds = fd_table[i]->fd.i + 1; ! } ! } else { ! if (writefds && FD_ISSET(i, writefds)) { ! if ((ret = fd_lock(i, FD_WRITE, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &write_locks); ! FD_SET(fd_table[i]->fd.i,&real_writefds); ! } ! if (fd_table[i]->fd.i >= data.nfds) { ! data.nfds = fd_table[i]->fd.i + 1; ! } ! } ! } ! } ! ! if (got_all_locks) { ! ! memcpy(&data.readfds,&real_readfds,sizeof(fd_set)); ! memcpy(&data.writefds,&real_writefds,sizeof(fd_set)); ! memcpy(&data.exceptfds,&real_exceptfds,sizeof(fd_set)); ! ! real_readfds_p = (readfds == NULL) ? NULL : &real_readfds; ! real_writefds_p = (writefds == NULL) ? NULL : &real_writefds; ! real_exceptfds_p = (exceptfds == NULL) ? NULL : &real_exceptfds; ! ! if ((ret = machdep_sys_select(data.nfds, real_readfds_p, ! real_writefds_p, real_exceptfds_p, &zero_timeout)) == OK) { ! ! pthread_sched_prevent(); ! ! real_exceptfds_p = (exceptfds == NULL) ? NULL : &data.exceptfds; ! real_writefds_p = (writefds == NULL) ? NULL : &data.writefds; ! real_readfds_p = (readfds == NULL) ? NULL : &data.readfds; ! ! pthread_queue_enq(&fd_wait_select, pthread_run); ! pthread_run->data.select_data = &data; ! SET_PF_WAIT_EVENT(pthread_run); ! ! if (timeout) { ! machdep_gettimeofday(¤t_time); ! sleep_schedule(¤t_time, &timeout_time); ! ! pthread_resched_resume(PS_SELECT_WAIT); ! ! /* We're awake */ ! CLEAR_PF_DONE_EVENT(pthread_run); ! if (sleep_cancel(pthread_run) == NOTOK) { ! ret = OK; ! } else { ! ret = data.nfds; ! } ! } else { ! pthread_resched_resume(PS_SELECT_WAIT); ! CLEAR_PF_DONE_EVENT(pthread_run); ! ret = data.nfds; /* XXX ??? snl */ ! } ! } else if (ret < 0) { ! SET_ERRNO(-ret); ! ret = NOTOK; ! } ! } ! ! /* clean up the locks */ ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&read_locks)) fd_unlock(i,FD_READ); ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&rdwr_locks)) fd_unlock(i,FD_RDWR); ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&write_locks)) fd_unlock(i,FD_WRITE); ! ! if (ret > 0) { ! if (readfds != NULL) { ! for (i = 0; i < numfds; i++) { ! if (! (FD_ISSET(i,readfds) && ! FD_ISSET(fd_table[i]->fd.i,real_readfds_p))) ! FD_CLR(i,readfds); ! } ! } ! if (writefds != NULL) { ! for (i = 0; i < numfds; i++) ! if (! (FD_ISSET(i,writefds) && ! FD_ISSET(fd_table[i]->fd.i,real_writefds_p))) ! FD_CLR(i,writefds); ! } ! if (exceptfds != NULL) { ! for (i = 0; i < numfds; i++) ! if (! (FD_ISSET(i,exceptfds) && ! FD_ISSET(fd_table[i]->fd.i,real_exceptfds_p))) ! FD_CLR(i,exceptfds); ! } ! } else { ! if (exceptfds != NULL) FD_ZERO(exceptfds); ! if (writefds != NULL) FD_ZERO(writefds); ! if (readfds != NULL) FD_ZERO(readfds); } ! return(ret); } --- 56,223 ---- int select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { ! fd_set real_exceptfds, real_readfds, real_writefds; /* mapped fd_sets */ ! fd_set * real_readfds_p, * real_writefds_p, * real_exceptfds_p; ! fd_set read_locks, write_locks, rdwr_locks; ! struct timespec timeout_time, current_time; ! struct timeval zero_timeout = { 0, 0 }; ! int i, j, ret = 0, got_all_locks = 1; ! struct pthread_select_data data; ! ! if (numfds > dtablesize) { ! numfds = dtablesize; ! } ! ! data.nfds = 0; ! FD_ZERO(&data.readfds); ! FD_ZERO(&data.writefds); ! FD_ZERO(&data.exceptfds); ! ! /* Do this first */ ! if (timeout) { ! machdep_gettimeofday(¤t_time); ! timeout_time.tv_sec = current_time.tv_sec + timeout->tv_sec; ! if ((timeout_time.tv_nsec = current_time.tv_nsec + ! (timeout->tv_usec * 1000)) > 1000000000) { ! timeout_time.tv_nsec -= 1000000000; ! timeout_time.tv_sec++; ! } ! } ! ! FD_ZERO(&read_locks); ! FD_ZERO(&write_locks); ! FD_ZERO(&rdwr_locks); ! FD_ZERO(&real_readfds); ! FD_ZERO(&real_writefds); ! FD_ZERO(&real_exceptfds); ! ! /* lock readfds */ ! if (readfds || writefds || exceptfds) { ! for (i = 0; i < numfds; i++) { ! if ((readfds && (FD_ISSET(i, readfds))) || ! (exceptfds && FD_ISSET(i, exceptfds))) { ! if (writefds && FD_ISSET(i ,writefds)) { ! if ((ret = fd_lock(i, FD_RDWR, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &rdwr_locks); ! FD_SET(fd_table[i]->fd.i,&real_writefds); ! } else { ! if ((ret = fd_lock(i, FD_READ, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &read_locks); ! } ! if (readfds && FD_ISSET(i,readfds)) { ! FD_SET(fd_table[i]->fd.i, &real_readfds); ! } ! if (exceptfds && FD_ISSET(i,exceptfds)) { ! FD_SET(fd_table[i]->fd.i, &real_exceptfds); ! } ! if (fd_table[i]->fd.i >= data.nfds) { ! data.nfds = fd_table[i]->fd.i + 1; ! } ! } else { ! if (writefds && FD_ISSET(i, writefds)) { ! if ((ret = fd_lock(i, FD_WRITE, NULL)) != OK) { ! got_all_locks = 0; ! break; ! } ! FD_SET(i, &write_locks); ! FD_SET(fd_table[i]->fd.i,&real_writefds); ! if (fd_table[i]->fd.i >= data.nfds) { ! data.nfds = fd_table[i]->fd.i + 1; ! } ! } ! } ! } ! } ! ! if (got_all_locks) { ! memcpy(&data.readfds,&real_readfds,sizeof(fd_set)); ! memcpy(&data.writefds,&real_writefds,sizeof(fd_set)); ! memcpy(&data.exceptfds,&real_exceptfds,sizeof(fd_set)); ! ! real_readfds_p = (readfds == NULL) ? NULL : &real_readfds; ! real_writefds_p = (writefds == NULL) ? NULL : &real_writefds; ! real_exceptfds_p = (exceptfds == NULL) ? NULL : &real_exceptfds; ! ! if ((ret = machdep_sys_select(data.nfds, real_readfds_p, ! real_writefds_p, real_exceptfds_p, ! &zero_timeout)) == OK) { ! pthread_sched_prevent(); ! ! real_exceptfds_p = (exceptfds == NULL) ? NULL : &data.exceptfds; ! real_writefds_p = (writefds == NULL) ? NULL : &data.writefds; ! real_readfds_p = (readfds == NULL) ? NULL : &data.readfds; ! ! pthread_queue_enq(&fd_wait_select, pthread_run); ! pthread_run->data.select_data = &data; ! SET_PF_WAIT_EVENT(pthread_run); ! if (timeout) { machdep_gettimeofday(¤t_time); ! sleep_schedule(¤t_time, &timeout_time); ! ! SET_PF_AT_CANCEL_POINT(pthread_run); ! pthread_resched_resume(PS_SELECT_WAIT); ! CLEAR_PF_AT_CANCEL_POINT(pthread_run); ! ! /* We're awake */ ! CLEAR_PF_DONE_EVENT(pthread_run); ! if (sleep_cancel(pthread_run) == NOTOK) { ! ret = OK; ! } else { ! ret = data.nfds; ! } ! } else { ! SET_PF_AT_CANCEL_POINT(pthread_run); ! pthread_resched_resume(PS_SELECT_WAIT); ! CLEAR_PF_AT_CANCEL_POINT(pthread_run); ! CLEAR_PF_DONE_EVENT(pthread_run); ! ret = data.nfds; /* XXX ??? snl */ ! } ! } else if (ret < 0) { ! SET_ERRNO(-ret); ! ret = NOTOK; ! } ! } ! ! /* clean up the locks */ ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&read_locks)) fd_unlock(i,FD_READ); ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&rdwr_locks)) fd_unlock(i,FD_RDWR); ! for (i = 0; i < numfds; i++) ! if (FD_ISSET(i,&write_locks)) fd_unlock(i,FD_WRITE); ! ! if (ret > 0) { ! if (readfds != NULL) { ! for (i = 0; i < numfds; i++) { ! if (! (FD_ISSET(i,readfds) && ! FD_ISSET(fd_table[i]->fd.i,real_readfds_p))) ! FD_CLR(i,readfds); ! } ! } ! if (writefds != NULL) { ! for (i = 0; i < numfds; i++) ! if (! (FD_ISSET(i,writefds) && ! FD_ISSET(fd_table[i]->fd.i,real_writefds_p))) ! FD_CLR(i,writefds); ! } ! if (exceptfds != NULL) { ! for (i = 0; i < numfds; i++) ! if (! (FD_ISSET(i,exceptfds) && ! FD_ISSET(fd_table[i]->fd.i,real_exceptfds_p))) ! FD_CLR(i,exceptfds); } + } else { + if (exceptfds != NULL) FD_ZERO(exceptfds); + if (writefds != NULL) FD_ZERO(writefds); + if (readfds != NULL) FD_ZERO(readfds); + } ! return(ret); } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/sig.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 sig.c *** sig.c 1996/03/13 04:33:13 1.1.1.1 --- sig.c 1996/10/03 01:07:54 *************** *** 301,307 **** --- 301,310 ---- pthread_run->data.sigwait = set; pthread_run->ret = sig; + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ pthread_resched_resume(PS_SIGWAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ + return(OK); } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/signal.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 signal.c *** signal.c 1996/03/13 04:33:17 1.1.1.1 --- signal.c 1996/10/03 17:30:16 *************** *** 72,77 **** --- 72,78 ---- static void sig_handler(int signal); static void set_thread_timer(); + static void __cleanup_after_resume( void ); void sig_prevent(void); void sig_resume(void); *************** *** 482,502 **** } } ! /* Only bother if we are truly unlocking the kernel */ ! while (!(--pthread_kernel_lock)) { ! if (sig_to_process) { ! /* if (SIG_ANY(sig_to_process)) { */ ! pthread_kernel_lock++; ! sig_handler(0); ! continue; ! } ! if (pthread_run && pthread_run->sigcount) { ! pthread_kernel_lock++; ! pthread_sig_process(); ! continue; ! } ! break; ! } } /* ========================================================================== --- 483,489 ---- } } ! __cleanup_after_resume(); } /* ========================================================================== *************** *** 508,530 **** void pthread_resched_resume(enum pthread_state state) { pthread_run->state = state; - sig_handler(SIGVTALRM); ! /* Only bother if we are truely unlocking the kernel */ ! while (!(--pthread_kernel_lock)) { ! if (sig_to_process) { ! /* if (SIG_ANY(sig_to_process)) { */ ! pthread_kernel_lock++; ! sig_handler(0); ! continue; ! } ! if (pthread_run && pthread_run->sigcount) { ! pthread_kernel_lock++; ! pthread_sig_process(); ! continue; ! } ! break; } } /* ========================================================================== --- 495,523 ---- void pthread_resched_resume(enum pthread_state state) { pthread_run->state = state; ! /* Since we are about to block this thread, lets see if we are ! * at a cancel point and if we've been cancelled. ! * Avoid cancelling dead or unalloced threads. ! */ ! if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) && ! TEST_PTHREAD_IS_CANCELLABLE(pthread_run) && ! state != PS_DEAD && state != PS_UNALLOCED ) { ! ! /* Set this flag to avoid recursively calling pthread_exit */ ! /* We have to set this flag here because we will unlock the ! * kernel prior to calling pthread_cancel_internal. ! */ ! SET_PF_RUNNING_TO_CANCEL(pthread_run); ! ! pthread_run->old_state = state; /* unlock needs this data */ ! pthread_sched_resume(); /* Unlock kernel before cancel */ ! pthread_cancel_internal( 1 ); /* free locks and exit */ } + + sig_handler(SIGVTALRM); + + __cleanup_after_resume(); } /* ========================================================================== *************** *** 532,537 **** --- 525,543 ---- */ void pthread_sched_resume() { + __cleanup_after_resume(); + } + + /*---------------------------------------------------------------------- + * Function: __cleanup_after_resume + * Purpose: cleanup kernel locks after a resume + * Args: void + * Returns: void + * Notes: + *----------------------------------------------------------------------*/ + static void + __cleanup_after_resume( void ) + { /* Only bother if we are truely unlocking the kernel */ while (!(--pthread_kernel_lock)) { /* if (SIG_ANY(sig_to_process)) { */ *************** *** 546,551 **** --- 552,568 ---- continue; } break; + } + + if( pthread_run == NULL ) + return; /* Must be during init processing */ + + /* Test for cancel that should be handled now */ + + if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) && + TEST_PTHREAD_IS_CANCELLABLE(pthread_run) ) { + /* Kernel is already unlocked */ + pthread_cancel_internal( 1 ); /* free locks and exit */ } } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/sleep.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 sleep.c *** sleep.c 1996/03/11 08:33:32 1.1.1.1 --- sleep.c 1996/10/03 01:14:58 *************** *** 249,255 **** --- 249,257 ---- /* Reschedule thread */ SET_PF_WAIT_EVENT(pthread_run); + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ pthread_resched_resume(PS_SLEEP_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ CLEAR_PF_DONE_EVENT(pthread_run); /* Return actual time slept */ *************** *** 332,338 **** current_time.tv_sec++; } machdep_start_timer(&(current_time), ! &(pthread_sleep->wakeup_time)); } } else { for (pthread_last = pthread_sleep; pthread_last; --- 334,340 ---- current_time.tv_sec++; } machdep_start_timer(&(current_time), ! &(pthread_sleep->wakeup_time)); } } else { for (pthread_last = pthread_sleep; pthread_last; =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/stat.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 stat.c *** stat.c 1995/09/21 02:36:05 1.1.1.1 --- stat.c 1996/06/04 19:17:33 *************** *** 43,48 **** --- 43,49 ---- #include struct stat; + struct statfs; /* ========================================================================== * fstat() *************** *** 91,95 **** --- 92,115 ---- } return(ret); + } + + /* ========================================================================== + * fstatfs() + * + * Might want to indirect this. + */ + int fstatfs(int fd, struct statfs *buf) + { + int ret; + + if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) { + if ((ret = machdep_sys_fstatfs(fd_table[fd]->fd.i, buf)) < OK) { + SET_ERRNO(-ret); + ret = NOTOK; + } + fd_unlock(fd, FD_READ); + } + return(ret); } =================================================================== RCS file: /usr/cvssrc/pthreads-1_60_beta5/pthreads/wait.c,v retrieving revision 1.1.1.1 diff -c -r1.1.1.1 wait.c *** wait.c 1995/02/21 08:07:24 1.1.1.1 --- wait.c 1996/10/03 01:20:02 *************** *** 103,109 **** --- 103,111 ---- pthread_queue_enq(&wait_queue, pthread_run); /* reschedule unlocks scheduler */ + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ pthread_resched_resume(PS_WAIT_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ pthread_sched_prevent(); } *************** *** 126,132 **** --- 128,136 ---- pthread_queue_enq(&wait_queue, pthread_run); /* reschedule unlocks scheduler */ + SET_PF_AT_CANCEL_POINT(pthread_run); /* This is a cancel point */ pthread_resched_resume(PS_WAIT_WAIT); + CLEAR_PF_AT_CANCEL_POINT(pthread_run); /* No longer at cancel point */ pthread_sched_prevent(); }