diff options
Diffstat (limited to 'libpthread/nptl/sysdeps/unix/sysv/linux/arm')
10 files changed, 424 insertions, 471 deletions
| diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h index 49a935a52..b0586ea1e 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h @@ -37,61 +37,21 @@ typedef uintmax_t uatomic_max_t;  void __arm_link_error (void); -#ifdef __thumb__ - -/* Note that to allow efficient implementation the arguemtns are reversed -   relative to atomic_exchange_acq.  */ -int __thumb_swpb (int newvalue, void *mem) -  attribute_hidden; -unsigned int __thumb_swp (unsigned int newvalue, void *mem) -  attribute_hidden; -unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *mem) -  attribute_hidden; - -#define atomic_exchange_acq(mem, newvalue)				      \ -  ({ __typeof (*mem) result;						      \ -     if (sizeof (*mem) == 1)						      \ -       result = __thumb_swpb (newvalue, mem);				      \ -     else if (sizeof (*mem) == 4)					      \ -       result = __thumb_swp (newvalue, mem);				      \ -     else								      \ -       {								      \ -	 result = 0;							      \ -	 abort ();							      \ -       }								      \ -     result; }) - -#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ -  ({ __arm_link_error (); oldval; }) - -#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ -  ({ __arm_link_error (); oldval; }) - -#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -  ((__typeof (oldval)) __thumb_cmpxchg (oldval, newval, mem)) - -#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ -  ({ __arm_link_error (); oldval; }) - +#ifdef __thumb2__ +#define atomic_full_barrier() \ +     __asm__ __volatile__						      \ +	     ("movw\tip, #0x0fa0\n\t"					      \ +	      "movt\tip, #0xffff\n\t"					      \ +	      "blx\tip"							      \ +	      : : : "ip", "lr", "cc", "memory");  #else -/* ARM mode.  */ - -#define atomic_exchange_acq(mem, newvalue)				      \ -  ({ __typeof (*mem) _xchg_result;					      \ -     if (sizeof (*mem) == 1)						      \ -       __asm__ __volatile__ ("swpb %0, %1, [%2]"			      \ -			     : "=&r,&r" (_xchg_result)			      \ -			     : "r,0" (newvalue), "r,r" (mem) : "memory");     \ -     else if (sizeof (*mem) == 4)					      \ -       __asm__ __volatile__ ("swp %0, %1, [%2]"				      \ -			     : "=&r,&r" (_xchg_result)			      \ -			     : "r,0" (newvalue), "r,r" (mem) : "memory");     \ -     else								      \ -       {								      \ -	 _xchg_result = 0;						      \ -	 abort ();							      \ -       }								      \ -     _xchg_result; }) +#define atomic_full_barrier() \ +     __asm__ __volatile__						      \ +	     ("mov\tip, #0xffff0fff\n\t"				      \ +	      "mov\tlr, pc\n\t"						      \ +	      "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)"			      \ +	      : : : "ip", "lr", "cc", "memory"); +#endif  /* Atomic compare and exchange.  This sequence relies on the kernel to     provide a compare and exchange operation which is atomic on the @@ -108,6 +68,9 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me     specify one to work around GCC PR rtl-optimization/21223.  Otherwise     it may cause a_oldval or a_tmp to be moved to a different register.  */ +#ifdef __thumb2__ +/* Thumb-2 has ldrex/strex.  However it does not have barrier instructions, +   so we still need to use the kernel helper.  */  #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \    ({ register __typeof (oldval) a_oldval asm ("r0");			      \       register __typeof (oldval) a_newval asm ("r1") = (newval);		      \ @@ -115,22 +78,45 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me       register __typeof (oldval) a_tmp asm ("r3");			      \       register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);	      \       __asm__ __volatile__						      \ -	     ("0:\tldr\t%1,[%3]\n\t"					      \ -	      "cmp\t%1, %4\n\t"						      \ +	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \ +	      "cmp\t%[tmp], %[old2]\n\t"				      \  	      "bne\t1f\n\t"						      \ -	      "mov\t%0, %4\n\t"						      \ -	      "mov\t%1, #0xffff0fff\n\t"				      \ +	      "mov\t%[old], %[old2]\n\t"				      \ +	      "movw\t%[tmp], #0x0fc0\n\t"				      \ +	      "movt\t%[tmp], #0xffff\n\t"				      \ +	      "blx\t%[tmp]\n\t"						      \ +	      "bcc\t0b\n\t"						      \ +	      "mov\t%[tmp], %[old2]\n\t"				      \ +	      "1:"							      \ +	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \ +	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \ +		[old2] "r" (a_oldval2)					      \ +	      : "ip", "lr", "cc", "memory");				      \ +     a_tmp; }) +#else +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ +  ({ register __typeof (oldval) a_oldval asm ("r0");			      \ +     register __typeof (oldval) a_newval asm ("r1") = (newval);		      \ +     register __typeof (mem) a_ptr asm ("r2") = (mem);			      \ +     register __typeof (oldval) a_tmp asm ("r3");			      \ +     register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);	      \ +     __asm__ __volatile__						      \ +	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \ +	      "cmp\t%[tmp], %[old2]\n\t"				      \ +	      "bne\t1f\n\t"						      \ +	      "mov\t%[old], %[old2]\n\t"				      \ +	      "mov\t%[tmp], #0xffff0fff\n\t"				      \  	      "mov\tlr, pc\n\t"						      \ -	      "add\tpc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t"		      \ +	      "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t"		      \  	      "bcc\t0b\n\t"						      \ -	      "mov\t%1, %4\n\t"						      \ +	      "mov\t%[tmp], %[old2]\n\t"				      \  	      "1:"							      \ -	      : "=&r" (a_oldval), "=&r" (a_tmp)				      \ -	      : "r" (a_newval), "r" (a_ptr), "r" (a_oldval2)		      \ +	      : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp)		      \ +	      : [new] "r" (a_newval), [ptr] "r" (a_ptr),		      \ +		[old2] "r" (a_oldval2)					      \  	      : "ip", "lr", "cc", "memory");				      \       a_tmp; }) +#endif  #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \    ({ __arm_link_error (); oldval; }) - -#endif /* __thumb__ */ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h index ea8d6a2f0..e1b115c8c 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h @@ -19,6 +19,8 @@  #ifndef _BITS_PTHREADTYPES_H  #define _BITS_PTHREADTYPES_H	1 +#include <endian.h> +  #define __SIZEOF_PTHREAD_ATTR_T 36  #define __SIZEOF_PTHREAD_MUTEX_T 24  #define __SIZEOF_PTHREAD_MUTEXATTR_T 4 @@ -126,9 +128,21 @@ typedef union      unsigned int __writer_wakeup;      unsigned int __nr_readers_queued;      unsigned int __nr_writers_queued; +#if __BYTE_ORDER == __BIG_ENDIAN +    unsigned char __pad1; +    unsigned char __pad2; +    unsigned char __shared; +    /* FLAGS must stay at this position in the structure to maintain +       binary compatibility.  */ +    unsigned char __flags; +#else      /* FLAGS must stay at this position in the structure to maintain         binary compatibility.  */ -    unsigned int __flags; +    unsigned char __flags; +    unsigned char __shared; +    unsigned char __pad1; +    unsigned char __pad2; +#endif      int __writer;    } __data;    char __size[__SIZEOF_PTHREAD_RWLOCK_T]; diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h index 3fc647d31..dadfac2af 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2002, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -27,9 +27,6 @@  /* Value returned if `sem_open' failed.  */  #define SEM_FAILED      ((sem_t *) 0) -/* Maximum value the semaphore can have.  */ -#define SEM_VALUE_MAX   ((int) ((~0u) >> 1)) -  typedef union  { diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c index 74be18855..60ccf7700 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c @@ -1,5 +1,5 @@  /* low level locking for pthread library.  Generic futex-using version. -   Copyright (C) 2003, 2005 Free Software Foundation, Inc. +   Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -22,8 +22,36 @@  #include <lowlevellock.h>  #include <sys/time.h> +void +__lll_lock_wait_private (int *futex) +{ +  do +    { +      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); +      if (oldval != 0) +	lll_futex_wait (futex, 2, LLL_PRIVATE); +    } +  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); +} + + +/* These functions don't get included in libc.so  */ +#ifdef IS_IN_libpthread +void +__lll_lock_wait (int *futex, int private) +{ +  do +    { +      int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1); +      if (oldval != 0) +	lll_futex_wait (futex, 2, private); +    } +  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0); +} + +  int -__lll_timedlock_wait (int *futex, const struct timespec *abstime) +__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)  {    struct timespec rt; @@ -55,23 +83,10 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)        if (rt.tv_sec < 0)  	return ETIMEDOUT; -      lll_futex_timed_wait (futex, 2, &rt); +      // XYZ: Lost the lock to check whether it was private. +      lll_futex_timed_wait (futex, 2, &rt, private);      } -  while (atomic_exchange_acq (futex, 2) != 0); - -  return 0; -} - - -/* These don't get included in libc.so  */ -#ifdef IS_IN_libpthread -int -lll_unlock_wake_cb (int *futex) -{ -  int val = atomic_exchange_rel (futex, 0); - -  if (__builtin_expect (val > 1, 0)) -    lll_futex_wake (futex, 1); +  while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);    return 0;  } @@ -108,11 +123,11 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)  	return ETIMEDOUT;        /* Wait until thread terminates.  */ -      if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT) +      // XYZ: Lost the lock to check whether it was private. +      if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)  	return ETIMEDOUT;      }    return 0;  } -  #endif diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h index 79f3ddeca..4c7d08c92 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2005, 2006 Free Software Foundation, Inc. +/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.     This file is part of the GNU C Library.     The GNU C Library is free software; you can redistribute it and/or @@ -12,7 +12,7 @@     Lesser General Public License for more details.     You should have received a copy of the GNU Lesser General Public -   License along with the GNU C Libr	\ary; if not, write to the Free +   License along with the GNU C Library; if not, write to the Free     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA     02111-1307 USA.  */ @@ -24,6 +24,7 @@  #include <bits/pthreadtypes.h>  #include <atomic.h>  #include <sysdep.h> +#include <bits/kernel-features.h>  #define FUTEX_WAIT		0  #define FUTEX_WAKE		1 @@ -31,267 +32,231 @@  #define FUTEX_CMP_REQUEUE	4  #define FUTEX_WAKE_OP		5  #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE	((4 << 24) | 1) +#define FUTEX_LOCK_PI		6 +#define FUTEX_UNLOCK_PI		7 +#define FUTEX_TRYLOCK_PI	8 +#define FUTEX_WAIT_BITSET	9 +#define FUTEX_WAKE_BITSET	10 +#define FUTEX_PRIVATE_FLAG	128 +#define FUTEX_CLOCK_REALTIME	256 + +#define FUTEX_BITSET_MATCH_ANY	0xffffffff + +/* Values for 'private' parameter of locking macros.  Yes, the +   definition seems to be backwards.  But it is not.  The bit will be +   reversed before passing to the system call.  */ +#define LLL_PRIVATE	0 +#define LLL_SHARED	FUTEX_PRIVATE_FLAG + + +#if !defined NOT_IN_libc || defined IS_IN_rtld +/* In libc.so or ld.so all futexes are private.  */ +# ifdef __ASSUME_PRIVATE_FUTEX +#  define __lll_private_flag(fl, private) \ +  ((fl) | FUTEX_PRIVATE_FLAG) +# else +#  define __lll_private_flag(fl, private) \ +  ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) +# endif +#else +# ifdef __ASSUME_PRIVATE_FUTEX +#  define __lll_private_flag(fl, private) \ +  (((fl) | FUTEX_PRIVATE_FLAG) ^ (private)) +# else +#  define __lll_private_flag(fl, private) \ +  (__builtin_constant_p (private)					      \ +   ? ((private) == 0							      \ +      ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))	      \ +      : (fl))								      \ +   : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG)				      \ +	      & THREAD_GETMEM (THREAD_SELF, header.private_futex)))) +# endif	       +#endif -/* Initializer for compatibility lock.	*/ -#define LLL_MUTEX_LOCK_INITIALIZER (0) -#define lll_futex_wait(futexp, val) \ -  ({									      \ -    INTERNAL_SYSCALL_DECL (__err);					      \ -    long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAIT, (val), 0);		      \ -    __ret;								      \ -  }) +#define lll_futex_wait(futexp, val, private) \ +  lll_futex_timed_wait(futexp, val, NULL, private) -#define lll_futex_timed_wait(futexp, val, timespec) \ +#define lll_futex_timed_wait(futexp, val, timespec, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAIT, (val), (timespec));	      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),		      \ +			      __lll_private_flag (FUTEX_WAIT, private),	      \ +			      (val), (timespec));			      \      __ret;								      \    }) -#define lll_futex_wake(futexp, nr) \ +#define lll_futex_wake(futexp, nr, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 4,				      \ -			      (futexp), FUTEX_WAKE, (nr), 0);		      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp),		      \ +			      __lll_private_flag (FUTEX_WAKE, private),	      \ +			      (nr), 0);					      \      __ret;								      \    }) -#define lll_robust_mutex_dead(futexv) \ +#define lll_robust_dead(futexv, private) \    do									      \      {									      \        int *__futexp = &(futexv);					      \        atomic_or (__futexp, FUTEX_OWNER_DIED);				      \ -      lll_futex_wake (__futexp, 1);					      \ +      lll_futex_wake (__futexp, 1, private);				      \      }									      \    while (0)  /* Returns non-zero if error happened, zero if success.  */ -#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \ +#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \ -			      (futexp), FUTEX_CMP_REQUEUE, (nr_wake),	      \ -			      (nr_move), (mutex), (val));		      \ -    __ret;								      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \ +			      __lll_private_flag (FUTEX_CMP_REQUEUE, private),\ +			      (nr_wake), (nr_move), (mutex), (val));	      \ +    INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \    })  /* Returns non-zero if error happened, zero if success.  */ -#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \ +#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \    ({									      \      INTERNAL_SYSCALL_DECL (__err);					      \      long int __ret;							      \ -    __ret = INTERNAL_SYSCALL (futex, __err, 6,				      \ -			      (futexp), FUTEX_WAKE_OP, (nr_wake),	      \ -			      (nr_wake2), (futexp2),			      \ +    __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp),		      \ +			      __lll_private_flag (FUTEX_WAKE_OP, private),    \ +			      (nr_wake), (nr_wake2), (futexp2),		      \  			      FUTEX_OP_CLEAR_WAKE_IF_GT_ONE);		      \ -    __ret;								      \ +    INTERNAL_SYSCALL_ERROR_P (__ret, __err);				      \    }) -static inline int __attribute__((always_inline)) -__lll_mutex_trylock (int *futex) -{ -  int flag = 1, old; -#ifdef __thumb__ -  old = atomic_exchange_acq (futex, flag); -  if (old < 1) -    flag = 0; -  else if (old > 1) -    flag = atomic_exchange_acq (futex, old); -#else -  __asm__ __volatile__ ( -    "\tswp	%[old], %[flag], [%[futex]]	@ try to take the lock\n" -    "\tcmp	%[old], #1			@ check old lock value\n" -    "\tmovlo	%[flag], #0			@ if we got it, return 0\n" -    "\tswphi	%[flag], %[old], [%[futex]]	@ if it was contested,\n" -    "						@ restore the contested flag,\n" -    "						@ and check whether that won." -    : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old) -    : : "memory" ); -#endif +#define lll_trylock(lock)	\ +  atomic_compare_and_exchange_val_acq(&(lock), 1, 0) + +#define lll_cond_trylock(lock)	\ +  atomic_compare_and_exchange_val_acq(&(lock), 2, 0) + +#define __lll_robust_trylock(futex, id) \ +  (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0) +#define lll_robust_trylock(lock, id) \ +  __lll_robust_trylock (&(lock), id) + +extern void __lll_lock_wait_private (int *futex) attribute_hidden; +extern void __lll_lock_wait (int *futex, int private) attribute_hidden; +extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden; + +#define __lll_lock(futex, private)					      \ +  ((void) ({								      \ +    int *__futex = (futex);						      \ +    if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex,       \ +								1, 0), 0))    \ +      {									      \ +	if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ +	  __lll_lock_wait_private (__futex);				      \ +	else								      \ +	  __lll_lock_wait (__futex, private);				      \ +      }									      \ +  })) +#define lll_lock(futex, private) __lll_lock (&(futex), private) + + +#define __lll_robust_lock(futex, id, private)				      \ +  ({									      \ +    int *__futex = (futex);						      \ +    int __val = 0;							      \ +									      \ +    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \ +								0), 0))	      \ +      __val = __lll_robust_lock_wait (__futex, private);		      \ +    __val;								      \ +  }) +#define lll_robust_lock(futex, id, private) \ +  __lll_robust_lock (&(futex), id, private) + + +#define __lll_cond_lock(futex, private)					      \ +  ((void) ({								      \ +    int *__futex = (futex);						      \ +    if (__builtin_expect (atomic_exchange_acq (__futex, 2), 0))		      \ +      __lll_lock_wait (__futex, private);				      \ +  })) +#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private) + + +#define lll_robust_cond_lock(futex, id, private) \ +  __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private) -  return flag; -} -#define lll_mutex_trylock(lock)	__lll_mutex_trylock (&(lock)) - - -static inline int __attribute__((always_inline)) -__lll_mutex_cond_trylock (int *futex) -{ -  int flag = 2, old; -#ifdef __thumb__ -  old = atomic_exchange_acq (futex, flag); -  if (old < 1) -    flag = 0; -  else if (old > 1) -    flag = atomic_exchange_acq (futex, old); -#else -  __asm__ __volatile__ ( -    "\tswp	%[old], %[flag], [%[futex]]	@ try to take the lock\n" -    "\tcmp	%[old], #1			@ check old lock value\n" -    "\tmovlo	%[flag], #0			@ if we got it, return 0\n" -    "\tswphi	%[flag], %[old], [%[futex]]	@ if it was contested,\n" -    "						@ restore the contested flag,\n" -    "						@ and check whether that won." -    : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old) -    : : "memory" ); -#endif -  return flag; -} -#define lll_mutex_cond_trylock(lock)	__lll_mutex_cond_trylock (&(lock)) - - -static inline int __attribute__((always_inline)) -__lll_robust_mutex_trylock(int *futex, int id) -{ -  return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0; -} -#define lll_robust_mutex_trylock(lock, id) \ -  __lll_robust_mutex_trylock (&(lock), id) - -extern int __lll_robust_lock_wait (int *futex) attribute_hidden; - -static inline void __attribute__((always_inline)) -__lll_mutex_lock (int *futex) -{ -  int val = atomic_exchange_acq (futex, 1); - -  if (__builtin_expect (val != 0, 0)) -    { -      while (atomic_exchange_acq (futex, 2) != 0) -	lll_futex_wait (futex, 2); -    } -} -#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex)) - - -static inline int __attribute__ ((always_inline)) -__lll_robust_mutex_lock (int *futex, int id) -{ -  int result = 0; -  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) -    result = __lll_robust_lock_wait (futex); -  return result; -} -#define lll_robust_mutex_lock(futex, id) \ -  __lll_robust_mutex_lock (&(futex), id) - - -static inline void __attribute__ ((always_inline)) -__lll_mutex_cond_lock (int *futex) -{ -  int val = atomic_exchange_acq (futex, 2); - -  if (__builtin_expect (val != 0, 0)) -    { -      while (atomic_exchange_acq (futex, 2) != 0) -	lll_futex_wait (futex, 2); -    } -} -#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex)) - - -#define lll_robust_mutex_cond_lock(futex, id) \ -  __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS) - - -extern int __lll_timedlock_wait (int *futex, const struct timespec *) -	attribute_hidden; -extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) -	attribute_hidden; - -static inline int __attribute__ ((always_inline)) -__lll_mutex_timedlock (int *futex, const struct timespec *abstime) -{ -  int result = 0; -  int val = atomic_exchange_acq (futex, 1); - -  if (__builtin_expect (val != 0, 0)) -    result = __lll_timedlock_wait (futex, abstime); -  return result; -} -#define lll_mutex_timedlock(futex, abstime) \ -  __lll_mutex_timedlock (&(futex), abstime) - - -static inline int __attribute__ ((always_inline)) -__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime, -			      int id) -{ -  int result = 0; -  if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0) -    result = __lll_robust_timedlock_wait (futex, abstime); -  return result; -} -#define lll_robust_mutex_timedlock(futex, abstime, id) \ -  __lll_robust_mutex_timedlock (&(futex), abstime, id) - - -static inline void __attribute__ ((always_inline)) -__lll_mutex_unlock (int *futex) -{ -  int val = atomic_exchange_rel (futex, 0); -  if (__builtin_expect (val > 1, 0)) -    lll_futex_wake (futex, 1); -} -#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex)) - - -static inline void __attribute__ ((always_inline)) -__lll_robust_mutex_unlock (int *futex, int mask) -{ -  int val = atomic_exchange_rel (futex, 0); -  if (__builtin_expect (val & mask, 0)) -    lll_futex_wake (futex, 1); -} -#define lll_robust_mutex_unlock(futex) \ -  __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS) - - -static inline void __attribute__ ((always_inline)) -__lll_mutex_unlock_force (int *futex) -{ -  (void) atomic_exchange_rel (futex, 0); -  lll_futex_wake (futex, 1); -} -#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex)) - - -#define lll_mutex_islocked(futex) \ +extern int __lll_timedlock_wait (int *futex, const struct timespec *, +				 int private) attribute_hidden; +extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *, +					int private) attribute_hidden; + +#define __lll_timedlock(futex, abstime, private)			      \ +  ({									      \ +     int *__futex = (futex);						      \ +     int __val = 0;							      \ +									      \ +     if (__builtin_expect (atomic_exchange_acq (__futex, 1), 0))	      \ +       __val = __lll_timedlock_wait (__futex, abstime, private);	      \ +     __val;								      \ +  }) +#define lll_timedlock(futex, abstime, private) \ +  __lll_timedlock (&(futex), abstime, private) + + +#define __lll_robust_timedlock(futex, abstime, id, private)		      \ +  ({									      \ +    int *__futex = (futex);						      \ +    int __val = 0;							      \ +									      \ +    if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id,  \ +								0), 0))	      \ +      __val = __lll_robust_timedlock_wait (__futex, abstime, private);	      \ +    __val;								      \ +  }) +#define lll_robust_timedlock(futex, abstime, id, private) \ +  __lll_robust_timedlock (&(futex), abstime, id, private) + + +#define __lll_unlock(futex, private) \ +  (void)							\ +    ({ int *__futex = (futex);					\ +       int __oldval = atomic_exchange_rel (__futex, 0);		\ +       if (__builtin_expect (__oldval > 1, 0))			\ +	 lll_futex_wake (__futex, 1, private);			\ +    }) +#define lll_unlock(futex, private) __lll_unlock(&(futex), private) + + +#define __lll_robust_unlock(futex, private) \ +  (void)							\ +    ({ int *__futex = (futex);					\ +       int __oldval = atomic_exchange_rel (__futex, 0);		\ +       if (__builtin_expect (__oldval & FUTEX_WAITERS, 0))	\ +	 lll_futex_wake (__futex, 1, private);			\ +    }) +#define lll_robust_unlock(futex, private) \ +  __lll_robust_unlock(&(futex), private) + + +#define lll_islocked(futex) \    (futex != 0)  /* Our internal lock implementation is identical to the binary-compatible     mutex implementation. */ -/* Type for lock object.  */ -typedef int lll_lock_t; -  /* Initializers for lock.  */  #define LLL_LOCK_INITIALIZER		(0)  #define LLL_LOCK_INITIALIZER_LOCKED	(1) -extern int lll_unlock_wake_cb (int *__futex) attribute_hidden; -  /* The states of a lock are:      0  -  untaken      1  -  taken by one user     >1  -  taken by more users */ -#define lll_trylock(lock)	lll_mutex_trylock (lock) -#define lll_lock(lock)		lll_mutex_lock (lock) -#define lll_unlock(lock)	lll_mutex_unlock (lock) -#define lll_islocked(lock)	lll_mutex_islocked (lock) -  /* The kernel notifies a process which uses CLONE_CLEARTID via futex     wakeup when the clone terminates.  The memory location contains the     thread ID while the clone is running and is reset to zero @@ -300,7 +265,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;    do {					\      __typeof (tid) __tid;		\      while ((__tid = (tid)) != 0)	\ -      lll_futex_wait (&(tid), __tid);	\ +      lll_futex_wait (&(tid), __tid, LLL_SHARED);\    } while (0)  extern int __lll_timedwait_tid (int *, const struct timespec *) @@ -314,26 +279,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)      __res;						\    }) - -/* Conditional variable handling.  */ - -extern void __lll_cond_wait (pthread_cond_t *cond) -     attribute_hidden; -extern int __lll_cond_timedwait (pthread_cond_t *cond, -				 const struct timespec *abstime) -     attribute_hidden; -extern void __lll_cond_wake (pthread_cond_t *cond) -     attribute_hidden; -extern void __lll_cond_broadcast (pthread_cond_t *cond) -     attribute_hidden; - -#define lll_cond_wait(cond) \ -  __lll_cond_wait (cond) -#define lll_cond_timedwait(cond, abstime) \ -  __lll_cond_timedwait (cond, abstime) -#define lll_cond_wake(cond) \ -  __lll_cond_wake (cond) -#define lll_cond_broadcast(cond) \ -  __lll_cond_broadcast (cond) -  #endif	/* lowlevellock.h */ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c index c8925810c..d81ecd4e5 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c @@ -27,7 +27,7 @@ clear_once_control (void *arg)    pthread_once_t *once_control = (pthread_once_t *) arg;    *once_control = 0; -  lll_futex_wake (once_control, INT_MAX); +  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);  }  int @@ -66,7 +66,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))  	break;        /* Same generation, some other thread was faster. Wait.  */ -      lll_futex_wait (once_control, oldval); +      lll_futex_wait (once_control, oldval, LLL_PRIVATE);      }    /* This thread is the first here.  Do the initialization. @@ -82,7 +82,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))    *once_control = __fork_generation | 2;    /* Wake up all other threads.  */ -  lll_futex_wake (once_control, INT_MAX); +  lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);    return 0;  } diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h index 350d9af50..95d532802 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h @@ -24,11 +24,6 @@  #if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt -/* NOTE: We do mark syscalls with unwind annotations, for the benefit of -   cancellation; but they're really only accurate at the point of the -   syscall.  The ARM unwind directives are not rich enough without adding -   a custom personality function.  */ -  # undef PSEUDO  # define PSEUDO(name, syscall_name, args)				\    .section ".text";							\ @@ -48,55 +43,38 @@      cmn r0, $4096;							\      PSEUDO_RET;								\    .Lpseudo_cancel:							\ -    .fnstart;								\      DOCARGS_##args;	/* save syscall args etc. around CENABLE.  */	\      CENABLE;								\      mov ip, r0;		/* put mask in safe place.  */			\      UNDOCARGS_##args;	/* restore syscall args.  */			\ -    ldr r7, =SYS_ify (syscall_name);					\ -    swi 0x0;		/* do the call.  */				\ -    .fnend;		/* Past here we can't easily unwind.  */	\ -    mov r7, r0;		/* save syscall return value.  */		\ +    swi SYS_ify (syscall_name);	/* do the call.  */			\ +    str r0, [sp, $-4]!; /* save syscall return value.  */		\      mov r0, ip;		/* get mask back.  */				\      CDISABLE;								\ -    mov r0, r7;		/* retrieve return value.  */			\ -    RESTORE_LR_##args;							\ +    ldmfd sp!, {r0, lr}; /* retrieve return value and address.  */	\      UNDOARGS_##args;							\      cmn r0, $4096; -/* DOARGS pushes four bytes on the stack for five arguments, eight bytes for -   six arguments, and nothing for fewer.  In order to preserve doubleword -   alignment, sometimes we must save an extra register.  */ - -# define RESTART_UNWIND .fnend; .fnstart; .save {r7, lr} - -# define DOCARGS_0	stmfd sp!, {r7, lr}; .save {r7, lr} +# define DOCARGS_0	str lr, [sp, #-4]!;  # define UNDOCARGS_0 -# define RESTORE_LR_0	ldmfd sp!, {r7, lr}; -# define DOCARGS_1	stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8 -# define UNDOCARGS_1	ldr r0, [sp], #8; RESTART_UNWIND -# define RESTORE_LR_1	RESTORE_LR_0 +# define DOCARGS_1	stmfd sp!, {r0, lr}; +# define UNDOCARGS_1	ldr r0, [sp], #4; -# define DOCARGS_2	stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8 -# define UNDOCARGS_2	ldmfd sp!, {r0, r1}; RESTART_UNWIND -# define RESTORE_LR_2	RESTORE_LR_0 +# define DOCARGS_2	stmfd sp!, {r0, r1, lr}; +# define UNDOCARGS_2	ldmfd sp!, {r0, r1}; -# define DOCARGS_3	stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16 -# define UNDOCARGS_3	ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND -# define RESTORE_LR_3	RESTORE_LR_0 +# define DOCARGS_3	stmfd sp!, {r0, r1, r2, lr}; +# define UNDOCARGS_3	ldmfd sp!, {r0, r1, r2}; -# define DOCARGS_4	stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16 -# define UNDOCARGS_4	ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND -# define RESTORE_LR_4	RESTORE_LR_0 +# define DOCARGS_4	stmfd sp!, {r0, r1, r2, r3, lr}; +# define UNDOCARGS_4	ldmfd sp!, {r0, r1, r2, r3}; -# define DOCARGS_5	.save {r4}; stmfd sp!, {r0, r1, r2, r3, r4, r7, lr}; .save {r7, lr}; .pad #20 -# define UNDOCARGS_5	ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4}; .save {r7, lr}; .pad #4 -# define RESTORE_LR_5	ldmfd sp!, {r4, r7, lr} +# define DOCARGS_5	DOCARGS_4 +# define UNDOCARGS_5	UNDOCARGS_4 -# define DOCARGS_6	.save {r4, r5}; stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #20 -# define UNDOCARGS_6	ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4, r5}; .save {r7, lr} -# define RESTORE_LR_6	RESTORE_LR_0 +# define DOCARGS_6	DOCARGS_5 +# define UNDOCARGS_6	UNDOCARGS_5  # ifdef IS_IN_libpthread  #  define CENABLE	bl PLTJMP(__pthread_enable_asynccancel) @@ -151,3 +129,9 @@ extern int __local_multiple_threads attribute_hidden;  # define NO_CANCELLATION 1  #endif + +#ifndef __ASSEMBLER__ +# define RTLD_SINGLE_THREAD_P \ +  __builtin_expect (THREAD_GETMEM (THREAD_SELF, \ +				   header.multiple_threads) == 0, 1) +#endif diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c index 206202809..e19facfb8 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2003, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2003, 2009 Free Software Foundation, Inc.     This file is part of the GNU C Library.     Contributed by Jakub Jelinek <jakub@redhat.com>. @@ -18,97 +18,94 @@     Boston, MA 02111-1307, USA.  */  #include <dlfcn.h> -#include <string.h> +#include <stdio.h>  #include <unwind.h> -#include <unistd.h>  #include <pthreadP.h> -#define __libc_dlopen(x)	dlopen(x, (RTLD_LOCAL | RTLD_LAZY)) -#define __libc_dlsym		dlsym - +static void *libgcc_s_handle;  static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);  static _Unwind_Reason_Code (*libgcc_s_personality) -  (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); +  (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *, +   struct _Unwind_Context *);  static _Unwind_Reason_Code (*libgcc_s_forcedunwind)    (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *);  static _Unwind_Word (*libgcc_s_getcfa) (struct _Unwind_Context *); +static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *); +static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);  void +__attribute_noinline__  pthread_cancel_init (void)  {    void *resume, *personality, *forcedunwind, *getcfa;    void *handle; +  void *sjlj_register, *sjlj_unregister; -  if (__builtin_expect (libgcc_s_getcfa != NULL, 1)) -    return; +  if (__builtin_expect (libgcc_s_handle != NULL, 1)) +    { +      /* Force gcc to reload all values.  */ +      asm volatile ("" ::: "memory"); +      return; +    }    handle = __libc_dlopen ("libgcc_s.so.1");    if (handle == NULL -      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL -      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL -      || (forcedunwind = __libc_dlsym (handle, "_Unwind_ForcedUnwind")) +      || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL +      || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL +      || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL +      || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL +      || (forcedunwind = __libc_dlsym (handle, "_Unwind_SjLj_ForcedUnwind"))  	 == NULL        || (getcfa = __libc_dlsym (handle, "_Unwind_GetCFA")) == NULL -#ifdef ARCH_CANCEL_INIT -      || ARCH_CANCEL_INIT (handle) -#endif        ) -    { -# define STR_N_LEN(str) str, strlen (str) -      INTERNAL_SYSCALL_DECL (err); -      INTERNAL_SYSCALL (write, err, 3, STDERR_FILENO, -			STR_N_LEN ("libgcc_s.so.1 must be installed for pthread_cancel to work\n")); -      abort (); -    } +    __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");    libgcc_s_resume = resume;    libgcc_s_personality = personality;    libgcc_s_forcedunwind = forcedunwind; +  libgcc_s_sjlj_register = sjlj_register; +  libgcc_s_sjlj_unregister = sjlj_unregister;    libgcc_s_getcfa = getcfa; +  /* Make sure libgcc_s_getcfa is written last.  Otherwise, +     pthread_cancel_init might return early even when the pointer the +     caller is interested in is not initialized yet.  */ +  atomic_write_barrier (); +  libgcc_s_handle = handle; +} + +void +__libc_freeres_fn_section +__unwind_freeres (void) +{ +  void *handle = libgcc_s_handle; +  if (handle != NULL) +    { +      libgcc_s_handle = NULL; +      __libc_dlclose (handle); +    }  } -/* It's vitally important that _Unwind_Resume not have a stack frame; the -   ARM unwinder relies on register state at entrance.  So we write this in -   assembly.  */ - -asm ( -#ifdef __thumb__ -"	.code 32" -#endif -"	.globl	_Unwind_Resume\n" -"	.type	_Unwind_Resume, %function\n" -"_Unwind_Resume:\n" -"	stmfd	sp!, {r4, r5, r6, lr}\n" -"	ldr	r4, 1f\n" -"	ldr	r5, 2f\n" -"3:	add	r4, pc, r4\n" -"	ldr	r3, [r4, r5]\n" -"	mov	r6, r0\n" -"	cmp	r3, #0\n" -"	beq	4f\n" -"5:	mov	r0, r6\n" -"	ldmfd	sp!, {r4, r5, r6, lr}\n" -"	bx	r3\n" -"4:	bl	pthread_cancel_init\n" -"	ldr	r3, [r4, r5]\n" -"	b	5b\n" -"1:	.word	_GLOBAL_OFFSET_TABLE_ - 3b - 8\n" -"2:	.word	libgcc_s_resume(GOTOFF)\n" -"	.size	_Unwind_Resume, .-_Unwind_Resume\n" -#ifdef __thumb__ -"	.code 16" -#endif -); +void +_Unwind_Resume (struct _Unwind_Exception *exc) +{ +  if (__builtin_expect (libgcc_s_resume == NULL, 0)) +    pthread_cancel_init (); + +  libgcc_s_resume (exc); +}  _Unwind_Reason_Code -__gcc_personality_v0 (_Unwind_State state, -		      struct _Unwind_Exception *ue_header, -		      struct _Unwind_Context *context) +__gcc_personality_v0 (int version, _Unwind_Action actions, +		      _Unwind_Exception_Class exception_class, +                      struct _Unwind_Exception *ue_header, +                      struct _Unwind_Context *context)  {    if (__builtin_expect (libgcc_s_personality == NULL, 0))      pthread_cancel_init (); -  return libgcc_s_personality (state, ue_header, context); + +  return libgcc_s_personality (version, actions, exception_class, +			       ue_header, context);  }  _Unwind_Reason_Code @@ -117,6 +114,7 @@ _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,  {    if (__builtin_expect (libgcc_s_forcedunwind == NULL, 0))      pthread_cancel_init (); +    return libgcc_s_forcedunwind (exc, stop, stop_argument);  } @@ -125,5 +123,24 @@ _Unwind_GetCFA (struct _Unwind_Context *context)  {    if (__builtin_expect (libgcc_s_getcfa == NULL, 0))      pthread_cancel_init (); +    return libgcc_s_getcfa (context);  } + +void +_Unwind_SjLj_Register (struct SjLj_Function_Context *fc) +{ +  if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0)) +    pthread_cancel_init (); + +  libgcc_s_sjlj_register (fc); +} + +void +_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc) +{ +  if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0)) +    pthread_cancel_init (); + +  libgcc_s_sjlj_unregister (fc); +} diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c index 99b15746a..8dcfd34b1 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2003, 2005 Free Software Foundation, Inc. +/* Copyright (C) 2003 Free Software Foundation, Inc.     This file is part of the GNU C Library.     Contributed by Jakub Jelinek <jakub@redhat.com>. @@ -19,77 +19,69 @@  #include <dlfcn.h>  #include <stdio.h> -#include <stdlib.h>  #include <unwind.h> -#define __libc_dlopen(x)	dlopen(x, (RTLD_LOCAL | RTLD_LAZY)) -#define __libc_dlsym		dlsym -  static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);  static _Unwind_Reason_Code (*libgcc_s_personality) -  (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); - -static void init (void) __attribute_used__; +  (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *, +   struct _Unwind_Context *); +static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *); +static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);  static void  init (void)  {    void *resume, *personality;    void *handle; +  void *sjlj_register, *sjlj_unregister;    handle = __libc_dlopen ("libgcc_s.so.1");    if (handle == NULL -      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL -      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL) { -    fprintf(stderr, "libgcc_s.so.1 must be installed for pthread_cancel to work\n"); -    abort (); -  } +      || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL +      || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL +      || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL +      || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL) +    __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");    libgcc_s_resume = resume;    libgcc_s_personality = personality; +  libgcc_s_sjlj_register = sjlj_register; +  libgcc_s_sjlj_unregister = sjlj_unregister;  } -/* It's vitally important that _Unwind_Resume not have a stack frame; the -   ARM unwinder relies on register state at entrance.  So we write this in -   assembly.  */ - -__asm__ ( -#ifdef __thumb__ -"       .code 32\n" -#endif -"	.globl	_Unwind_Resume\n" -"	.type	_Unwind_Resume, %function\n" -"_Unwind_Resume:\n" -"	stmfd	sp!, {r4, r5, r6, lr}\n" -"	ldr	r4, 1f\n" -"	ldr	r5, 2f\n" -"3:	add	r4, pc, r4\n" -"	ldr	r3, [r4, r5]\n" -"	mov	r6, r0\n" -"	cmp	r3, #0\n" -"	beq	4f\n" -"5:	mov	r0, r6\n" -"	ldmfd	sp!, {r4, r5, r6, lr}\n" -"	bx	r3\n" -"4:	bl	init\n" -"	ldr	r3, [r4, r5]\n" -"	b	5b\n" -"1:	.word	_GLOBAL_OFFSET_TABLE_ - 3b - 8\n" -"2:	.word	libgcc_s_resume(GOTOFF)\n" -"	.size	_Unwind_Resume, .-_Unwind_Resume\n" -#ifdef __thumb__ -"       .code 16\n" -#endif - -); +void +_Unwind_Resume (struct _Unwind_Exception *exc) +{ +  if (__builtin_expect (libgcc_s_resume == NULL, 0)) +    init (); +  libgcc_s_resume (exc); +}  _Unwind_Reason_Code -__gcc_personality_v0 (_Unwind_State state, -		      struct _Unwind_Exception *ue_header, -		      struct _Unwind_Context *context) +__gcc_personality_v0 (int version, _Unwind_Action actions, +		      _Unwind_Exception_Class exception_class, +                      struct _Unwind_Exception *ue_header, +                      struct _Unwind_Context *context)  {    if (__builtin_expect (libgcc_s_personality == NULL, 0))      init (); -  return libgcc_s_personality (state, ue_header, context); +  return libgcc_s_personality (version, actions, exception_class, +			       ue_header, context); +} + +void +_Unwind_SjLj_Register (struct SjLj_Function_Context *fc) +{ +  if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0)) +    init (); +  libgcc_s_sjlj_register (fc); +} + +void +_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc) +{ +  if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0)) +    init (); +  libgcc_s_sjlj_unregister (fc);  } diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h index d625fb288..eeb9cf8b6 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h @@ -1,5 +1,5 @@  /* Header file for the ARM EABI unwinder -   Copyright (C) 2003, 2004, 2005  Free Software Foundation, Inc. +   Copyright (C) 2003, 2004, 2005, 2009  Free Software Foundation, Inc.     Contributed by Paul Brook     This file is free software; you can redistribute it and/or modify it @@ -267,6 +267,11 @@ extern "C" {  #define _Unwind_SetIP(context, val) \    _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1)) +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) +     (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); +  #ifdef __cplusplus  }   /* extern "C" */  #endif | 
