diff options
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/mips')
5 files changed, 237 insertions, 128 deletions
| diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h index f112b8a39..166a6c6ae 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h @@ -1,5 +1,5 @@  /* Machine-specific pthread type layouts.  MIPS version. -   Copyright (C) 2005 Free Software Foundation, Inc. +   Copyright (C) 2005, 2006 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -20,7 +20,7 @@  #ifndef _BITS_PTHREADTYPES_H  #define _BITS_PTHREADTYPES_H	1 -#include <sgidefs.h> +#include <endian.h>  #if _MIPS_SIM == _ABI64  # define __SIZEOF_PTHREAD_ATTR_T 56 @@ -56,6 +56,7 @@ typedef union    long int __align;  } pthread_attr_t; +  #if _MIPS_SIM == _ABI64  typedef struct __pthread_internal_list  { @@ -69,6 +70,7 @@ typedef struct __pthread_internal_slist  } __pthread_slist_t;  #endif +  /* Data structures for mutex handling.  The structure of the attribute     type is deliberately not exposed.  */  typedef union @@ -87,7 +89,7 @@ typedef union  #if _MIPS_SIM == _ABI64      int __spins;      __pthread_list_t __list; -# define __PTHREAD_MUTEX_HAVE_PREV      1 +# define __PTHREAD_MUTEX_HAVE_PREV	1  #else      unsigned int __nusers;      __extension__ union @@ -157,9 +159,9 @@ typedef union      unsigned int __nr_readers_queued;      unsigned int __nr_writers_queued;      int __writer; -    int __pad1; +    int __shared; +    unsigned long int __pad1;      unsigned long int __pad2; -    unsigned long int __pad3;      /* FLAGS must stay at this position in the structure to maintain         binary compatibility.  */      unsigned int __flags; @@ -173,9 +175,21 @@ typedef union      unsigned int __writer_wakeup;      unsigned int __nr_readers_queued;      unsigned int __nr_writers_queued; +#if __BYTE_ORDER == __BIG_ENDIAN +    unsigned char __pad1; +    unsigned char __pad2; +    unsigned char __shared;      /* FLAGS must stay at this position in the structure to maintain         binary compatibility.  */ -    unsigned int __flags; +    unsigned char __flags; +#else +    /* FLAGS must stay at this position in the structure to maintain +       binary compatibility.  */ +    unsigned char __flags; +    unsigned char __shared; +    unsigned char __pad1; +    unsigned char __pad2; +#endif      int __writer;    } __data;  # endif diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h index c4440f9e9..af43a6048 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -29,9 +29,6 @@  /* Value returned if `sem_open' failed.  */  #define SEM_FAILED      ((sem_t *) 0) -/* Maximum value the semaphore can have.  */ -#define SEM_VALUE_MAX   (2147483647) -  typedef union  { diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h index 7edb28794..01bcf4120 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, +   2009 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -24,160 +25,260 @@  #include <bits/pthreadtypes.h>  #include <atomic.h>  #include <sysdep.h> - +#include <bits/kernel-features.h>  #define FUTEX_WAIT		0  #define FUTEX_WAKE		1  #define FUTEX_REQUEUE		3  #define FUTEX_CMP_REQUEUE	4 - -/* Initializer for compatibility lock.	*/ -#define LLL_MUTEX_LOCK_INITIALIZER (0) - -#define lll_futex_wait(futexp, val) \ +#define FUTEX_WAKE_OP		5 +#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1) +#define FUTEX_LOCK_PI		6 +#define FUTEX_UNLOCK_PI		7 +#define FUTEX_TRYLOCK_PI	8 +#define FUTEX_WAIT_BITSET	9 +#define FUTEX_WAKE_BITSET	10 +#define FUTEX_PRIVATE_FLAG	128 +#define FUTEX_CLOCK_REALTIME	256 + +#define FUTEX_BITSET_MATCH_ANY	0xffffffff + +/* Values for 'private' parameter of locking macros.  Yes, the +   definition seems to be backwards.  But it is not.  The bit will be +   reversed before passing to the system call.  */ +#define LLL_PRIVATE	0 +#define LLL_SHARED	FUTEX_PRIVATE_FLAG + + +#if !defined NOT_IN_libc || defined IS_IN_rtld +/* In libc.so or ld.so all futexes are private.  */ +# ifdef __ASSUME_PRIVATE_FUTEX +#  define __lll_private_flag(fl, private) \ +  ((fl) | FUTEX_PRIVATE_FLAG) +# else +#  define __lll_private_flag(fl, private) \ +  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) +# endif +#else +# ifdef __ASSUME_PRIVATE_FUTEX +#  define __lll_private_flag(fl, private) \ +  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private)) +# else +#  define __lll_private_flag(fl, private) \ +  (__builtin_constant_p (private)					      \ +   ? ((private) == 0							      \ +      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \ +      : (fl))								      \ +   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)				      \ +	      & THREAD_GETMEM (THREAD_SELF, header.private_futex)))) +# endif	       +#endif + + +#define lll_futex_wait(futexp, val, private) \ +  lll_futex_timed_wait(futexp, val, NULL, private) + +#define lll_futex_timed_wait(futexp, val, timespec, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAIT, (val), 0);		      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp),		      \ +			      __lll_private_flag (FUTEX_WAIT, private),	      \ +			      (val), (timespec));			      \      INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;		      \    }) -#define lll_futex_timed_wait(futexp, val, timespec) \ +#define lll_futex_wake(futexp, nr, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAIT, (val), (timespec));	      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp),		      \ +			      __lll_private_flag (FUTEX_WAKE, private),	      \ +			      (nr), 0);	      \      INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;		      \    }) -#define lll_futex_wake(futexp, nr) \ +#define lll_robust_dead(futexv, private) \ +  do									      \ +    {									      \ +      int *__futexp = &(futexv);					      \ +      atomic_or (__futexp, FUTEX_OWNER_DIED);				      \ +      lll_futex_wake (__futexp, 1, private);				      \ +    }									      \ +  while (0) + +/* Returns non-zero if error happened, zero if success.  */ +#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAKE, (nr), 0);		      \ -    INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret;		      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp),		      \ +			      __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ +			      (nr_wake), (nr_move), (mutex), (val));	      \ +    INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \    })  /* Returns non-zero if error happened, zero if success.  */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \ +#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \ -			      (futexp), FUTEX_CMP_REQUEUE, (nr_wake),	      \ -			      (nr_move), (mutex), (val));		      \ +									      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \ +			      __lll_private_flag (FUTEX_WAKE_OP, private),    \ +			      (nr_wake), (nr_wake2), (futexp2),		      \ +			      FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);		      \      INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \    }) -  static inline int __attribute__((always_inline)) -__lll_mutex_trylock(int *futex) +__lll_trylock(int *futex)  {    return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;  } -#define lll_mutex_trylock(lock)	__lll_mutex_trylock (&(lock)) +#define lll_trylock(lock)	__lll_trylock (&(lock))  static inline int __attribute__((always_inline)) -__lll_mutex_cond_trylock(int *futex) +__lll_cond_trylock(int *futex)  {    return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;  } -#define lll_mutex_cond_trylock(lock)	__lll_mutex_cond_trylock (&(lock)) +#define lll_cond_trylock(lock)	__lll_cond_trylock (&(lock)) -extern void __lll_lock_wait (int *futex) attribute_hidden; - -static inline void __attribute__((always_inline)) -__lll_mutex_lock(int *futex) +static inline int __attribute__((always_inline)) +__lll_robust_trylock(int *futex, int id)  { -  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) -    __lll_lock_wait (futex); +  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;  } -#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) +#define lll_robust_trylock(lock, id) \ +  __lll_robust_trylock (&(lock), id) + +extern void __lll_lock_wait_private (int *futex) attribute_hidden; +extern void __lll_lock_wait (int *futex, int private) attribute_hidden; +extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + +#define __lll_lock(futex, private)					      \ +  ((void) ({								      \ +    int *__futex = (futex);						      \ +    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex,      \ +								1, 0), 0))    \ +      {									      \ +	if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ +	  __lll_lock_wait_private (__futex);				      \ +	else								      \ +	  __lll_lock_wait (__futex, private);				      \ +      }									      \ +  })) +#define lll_lock(futex, private) __lll_lock (&(futex), private) + + +#define __lll_robust_lock(futex, id, private)				      \ +  ({									      \ +    int *__futex = (futex);						      \ +    int __val = 0;							      \ +									      \ +    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \ +								0), 0))	      \ +      __val = __lll_robust_lock_wait (__futex, private);		      \ +    __val;								      \ +  }) +#define lll_robust_lock(futex, id, private) \ +  __lll_robust_lock (&(futex), id, private)  static inline void __attribute__ ((always_inline)) -__lll_mutex_cond_lock (int *futex) +__lll_cond_lock (int *futex, int private)  {    if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0) -    __lll_lock_wait (futex); +    __lll_lock_wait (futex, private);  } -#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) +#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + + +#define lll_robust_cond_lock(futex, id, private) \ +  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private) -extern int __lll_timedlock_wait (int *futex, const struct timespec *) -	attribute_hidden; +extern int __lll_timedlock_wait (int *futex, const struct timespec *, +				 int private) attribute_hidden; +extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *, +					int private) attribute_hidden;  static inline int __attribute__ ((always_inline)) -__lll_mutex_timedlock (int *futex, const struct timespec *abstime) +__lll_timedlock (int *futex, const struct timespec *abstime, int private)  {    int result = 0;    if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0) -    result = __lll_timedlock_wait (futex, abstime); +    result = __lll_timedlock_wait (futex, abstime, private);    return result;  } -#define lll_mutex_timedlock(futex, abstime) \ -  __lll_mutex_timedlock (&(futex), abstime) +#define lll_timedlock(futex, abstime, private) \ +  __lll_timedlock (&(futex), abstime, private) -static inline void __attribute__ ((always_inline)) -__lll_mutex_unlock (int *futex) -{ -  int val = atomic_exchange_rel (futex, 0); -  if (__builtin_expect (val > 1, 0)) -    lll_futex_wake (futex, 1); -} -#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex)) - - -static inline void __attribute__ ((always_inline)) -__lll_mutex_unlock_force (int *futex) +static inline int __attribute__ ((always_inline)) +__lll_robust_timedlock (int *futex, const struct timespec *abstime, +			int id, int private)  { -  (void) atomic_exchange_rel (futex, 0); -  lll_futex_wake (futex, 1); +  int result = 0; +  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) +    result = __lll_robust_timedlock_wait (futex, abstime, private); +  return result;  } -#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex)) - - -#define lll_mutex_islocked(futex) \ +#define lll_robust_timedlock(futex, abstime, id, private) \ +  __lll_robust_timedlock (&(futex), abstime, id, private) + + +#define __lll_unlock(futex, private)					      \ +  ((void) ({								      \ +    int *__futex = (futex);						      \ +    int __val = atomic_exchange_rel (__futex, 0);			      \ +									      \ +    if (__builtin_expect (__val > 1, 0))				      \ +      lll_futex_wake (__futex, 1, private);				      \ +  })) +#define lll_unlock(futex, private) __lll_unlock(&(futex), private) + + +#define __lll_robust_unlock(futex, private)				      \ +  ((void) ({								      \ +    int *__futex = (futex);						      \ +    int __val = atomic_exchange_rel (__futex, 0);			      \ +									      \ +    if (__builtin_expect (__val & FUTEX_WAITERS, 0))			      \ +      lll_futex_wake (__futex, 1, private);				      \ +  })) +#define lll_robust_unlock(futex, private) \ +  __lll_robust_unlock(&(futex), private) + + +#define lll_islocked(futex) \    (futex != 0)  /* Our internal lock implementation is identical to the binary-compatible     mutex implementation. */ -/* Type for lock object.  */ -typedef int lll_lock_t; -  /* Initializers for lock.  */  #define LLL_LOCK_INITIALIZER		(0)  #define LLL_LOCK_INITIALIZER_LOCKED	(1) -extern int lll_unlock_wake_cb (int *__futex) attribute_hidden; -  /* The states of a lock are:      0  -  untaken      1  -  taken by one user     >1  -  taken by more users */ -#define lll_trylock(lock)	lll_mutex_trylock (lock) -#define lll_lock(lock)		lll_mutex_lock (lock) -#define lll_unlock(lock)	lll_mutex_unlock (lock) -#define lll_islocked(lock)	lll_mutex_islocked (lock) -  /* The kernel notifies a process which uses CLONE_CLEARTID via futex     wakeup when the clone terminates.  The memory location contains the     thread ID while the clone is running and is reset to zero     afterwards.	*/  #define lll_wait_tid(tid) \ -  do {					\ -    __typeof (tid) __tid;		\ -    while ((__tid = (tid)) != 0)	\ -      lll_futex_wait (&(tid), __tid);	\ +  do {							\ +    __typeof (tid) __tid;				\ +    while ((__tid = (tid)) != 0)			\ +      lll_futex_wait (&(tid), __tid, LLL_SHARED);	\    } while (0)  extern int __lll_timedwait_tid (int *, const struct timespec *) @@ -191,26 +292,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)      __res;						\    }) - -/* Conditional variable handling.  */ - -extern void __lll_cond_wait (pthread_cond_t *cond) -     attribute_hidden; -extern int __lll_cond_timedwait (pthread_cond_t *cond, -				 const struct timespec *abstime) -     attribute_hidden; -extern void __lll_cond_wake (pthread_cond_t *cond) -     attribute_hidden; -extern void __lll_cond_broadcast (pthread_cond_t *cond) -     attribute_hidden; - -#define lll_cond_wait(cond) \ -  __lll_cond_wait (cond) -#define lll_cond_timedwait(cond, abstime) \ -  __lll_cond_timedwait (cond, abstime) -#define lll_cond_wake(cond) \ -  __lll_cond_wake (cond) -#define lll_cond_broadcast(cond) \ -  __lll_cond_broadcast (cond) -  #endif	/* lowlevellock.h */ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c index 649b752f5..ddfd32bdb 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c @@ -30,7 +30,7 @@ clear_once_control (void *arg)    pthread_once_t *once_control = (pthread_once_t *) arg;    *once_control = 0; -  lll_futex_wake (once_control, INT_MAX); +  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);  } @@ -65,7 +65,7 @@ __pthread_once (once_control, init_routine)  	  if (((oldval ^ newval) & -4) == 0)  	    {  	      /* Same generation, some other thread was faster. Wait.  */ -	      lll_futex_wait (once_control, newval); +	      lll_futex_wait (once_control, newval, LLL_PRIVATE);  	      continue;  	    }  	} @@ -84,7 +84,7 @@ __pthread_once (once_control, init_routine)        atomic_increment (once_control);        /* Wake up all other threads.  */ -      lll_futex_wake (once_control, INT_MAX); +      lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);        break;      } diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h index 5fee89235..1cf625f4e 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h @@ -24,28 +24,38 @@  #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt -#ifdef __PIC__ +# ifdef __PIC__ +#  define PSEUDO_CPLOAD .cpload t9; +#  define PSEUDO_ERRJMP la t9, __syscall_error; jr t9; +#  define PSEUDO_SAVEGP sw gp, 32(sp); cfi_rel_offset (gp, 32); +#  define PSEUDO_LOADGP lw gp, 32(sp); +# else +#  define PSEUDO_CPLOAD +#  define PSEUDO_ERRJMP j __syscall_error; +#  define PSEUDO_SAVEGP +#  define PSEUDO_LOADGP +# endif +  # undef PSEUDO  # define PSEUDO(name, syscall_name, args)				      \        .align 2;								      \    L(pseudo_start):							      \        cfi_startproc;							      \ -  99: la t9,__syscall_error;						      \ -      jr t9;								      \ +  99: PSEUDO_ERRJMP							      \    .type __##syscall_name##_nocancel, @function;				      \    .globl __##syscall_name##_nocancel;					      \    __##syscall_name##_nocancel:						      \      .set noreorder;							      \ -    .cpload t9;								      \ +    PSEUDO_CPLOAD							      \      li v0, SYS_ify(syscall_name);					      \      syscall;								      \      .set reorder;							      \ -    bne a3, zero, SYSCALL_ERROR_LABEL;			       		      \ +    bne a3, zero, 99b;					       		      \      ret;								      \    .size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;	      \    ENTRY (name)								      \      .set noreorder;							      \ -    .cpload t9;								      \ +    PSEUDO_CPLOAD							      \      .set reorder;							      \      SINGLE_THREAD_P(v1);						      \      bne zero, v1, L(pseudo_cancel);					      \ @@ -53,17 +63,16 @@      li v0, SYS_ify(syscall_name);					      \      syscall;								      \      .set reorder;							      \ -    bne a3, zero, SYSCALL_ERROR_LABEL;			       		      \ +    bne a3, zero, 99b;					       		      \      ret;								      \    L(pseudo_cancel):							      \      SAVESTK_##args;						              \      sw ra, 28(sp);							      \      cfi_rel_offset (ra, 28);						      \ -    sw gp, 32(sp);							      \ -    cfi_rel_offset (gp, 32);						      \ +    PSEUDO_SAVEGP							      \      PUSHARGS_##args;			/* save syscall args */	      	      \      CENABLE;								      \ -    lw gp, 32(sp);							      \ +    PSEUDO_LOADGP							      \      sw v0, 44(sp);			/* save mask */			      \      POPARGS_##args;			/* restore syscall args */	      \      .set noreorder;							      \ @@ -74,12 +83,12 @@      sw a3, 40(sp);			/* save syscall error flag */	      \      lw a0, 44(sp);			/* pass mask as arg1 */		      \      CDISABLE;								      \ -    lw gp, 32(sp);							      \ +    PSEUDO_LOADGP							      \      lw v0, 36(sp);			/* restore syscall result */          \      lw a3, 40(sp);			/* restore syscall error flag */      \      lw ra, 28(sp);			/* restore return address */	      \      .set noreorder;							      \ -    bne a3, zero, SYSCALL_ERROR_LABEL;					      \ +    bne a3, zero, 99b;							      \       RESTORESTK;						              \    L(pseudo_end):							      \      .set reorder; @@ -87,8 +96,6 @@  # undef PSEUDO_END  # define PSEUDO_END(sym) cfi_endproc; .end sym; .size sym,.-sym -#endif -  # define PUSHARGS_0	/* nothing to do */  # define PUSHARGS_1	PUSHARGS_0 sw a0, 0(sp); cfi_rel_offset (a0, 0);  # define PUSHARGS_2	PUSHARGS_1 sw a1, 4(sp); cfi_rel_offset (a1, 4); @@ -135,19 +142,25 @@  # define RESTORESTK 	addu sp, STKSPACE; cfi_adjust_cfa_offset(-STKSPACE) +# ifdef __PIC__  /* We use jalr rather than jal.  This means that the assembler will not     automatically restore $gp (in case libc has multiple GOTs) so we must     do it manually - which we have to do anyway since we don't use .cprestore.     It also shuts up the assembler warning about not using .cprestore.  */ +#  define PSEUDO_JMP(sym) la t9, sym; jalr t9; +# else +#  define PSEUDO_JMP(sym) jal sym; +# endif +  # ifdef IS_IN_libpthread -#  define CENABLE	la t9, __pthread_enable_asynccancel; jalr t9; -#  define CDISABLE	la t9, __pthread_disable_asynccancel; jalr t9; +#  define CENABLE	PSEUDO_JMP (__pthread_enable_asynccancel) +#  define CDISABLE	PSEUDO_JMP (__pthread_disable_asynccancel)  # elif defined IS_IN_librt -#  define CENABLE	la t9, __librt_enable_asynccancel; jalr t9; -#  define CDISABLE	la t9, __librt_disable_asynccancel; jalr t9; +#  define CENABLE	PSEUDO_JMP (__librt_enable_asynccancel) +#  define CDISABLE	PSEUDO_JMP (__librt_disable_asynccancel)  # else -#  define CENABLE	la t9, __libc_enable_asynccancel; jalr t9; -#  define CDISABLE	la t9, __libc_disable_asynccancel; jalr t9; +#  define CENABLE	PSEUDO_JMP (__libc_enable_asynccancel) +#  define CDISABLE	PSEUDO_JMP (__libc_disable_asynccancel)  # endif  # ifndef __ASSEMBLER__ @@ -167,3 +180,9 @@  # define NO_CANCELLATION 1  #endif + +#ifndef __ASSEMBLER__ +# define RTLD_SINGLE_THREAD_P \ +  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \ +				   header.multiple_threads) == 0, 1) +#endif | 
