summaryrefslogtreecommitdiffstats
path: root/libpthread/nptl/sysdeps
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/sysdeps')
-rw-r--r--libpthread/nptl/sysdeps/arm/tls.h1
-rw-r--r--libpthread/nptl/sysdeps/generic/lowlevellock.h21
-rw-r--r--libpthread/nptl/sysdeps/i386/pthread_spin_lock.c3
-rw-r--r--libpthread/nptl/sysdeps/i386/tcb-offsets.sym4
-rw-r--r--libpthread/nptl/sysdeps/i386/tls.h77
-rw-r--r--libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym4
-rw-r--r--libpthread/nptl/sysdeps/powerpc/tls.h42
-rw-r--r--libpthread/nptl/sysdeps/pthread/Makefile.in9
-rw-r--r--libpthread/nptl/sysdeps/pthread/allocalim.h5
-rw-r--r--libpthread/nptl/sysdeps/pthread/bits/libc-lock.h54
-rw-r--r--libpthread/nptl/sysdeps/pthread/bits/sigthread.h8
-rw-r--r--libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h14
-rw-r--r--libpthread/nptl/sysdeps/pthread/createthread.c18
-rw-r--r--libpthread/nptl/sysdeps/pthread/librt-cancellation.c91
-rw-r--r--libpthread/nptl/sysdeps/pthread/list.h15
-rw-r--r--libpthread/nptl/sysdeps/pthread/malloc-machine.h25
-rw-r--r--libpthread/nptl/sysdeps/pthread/pt-initfini.c4
-rw-r--r--libpthread/nptl/sysdeps/pthread/pt-longjmp.c2
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread-functions.h20
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread.h374
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c17
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c28
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c21
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c29
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c51
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_once.c8
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c18
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c14
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c12
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c16
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c16
-rw-r--r--libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c3
-rw-r--r--libpthread/nptl/sysdeps/pthread/setxid.h7
-rw-r--r--libpthread/nptl/sysdeps/pthread/sigaction.c13
-rw-r--r--libpthread/nptl/sysdeps/pthread/sigfillset.c2
-rw-r--r--libpthread/nptl/sysdeps/pthread/tpp.c172
-rw-r--r--libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c80
-rw-r--r--libpthread/nptl/sysdeps/pthread/unwind-resume.c10
-rw-r--r--libpthread/nptl/sysdeps/sh/tcb-offsets.sym4
-rw-r--r--libpthread/nptl/sysdeps/sh/tls.h54
-rw-r--r--libpthread/nptl/sysdeps/sparc/tcb-offsets.sym1
-rw-r--r--libpthread/nptl/sysdeps/sparc/tls.h52
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in43
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h8
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h255
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c8
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h14
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h114
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h16
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h5
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c55
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h407
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c6
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h60
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c133
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c92
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h94
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/fork.c23
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/fork.h10
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S349
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S233
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S71
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S152
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S155
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S384
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S383
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S74
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S96
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S92
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S59
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S69
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S76
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S266
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S243
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S20
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h703
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h51
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S34
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S32
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h38
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h18
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h23
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c44
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c64
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym6
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h26
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h5
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h269
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c6
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h59
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c5
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions5
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h15
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h251
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h37
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h16
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c8
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c29
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c19
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym8
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c56
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c18
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c10
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c83
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/raise.c9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c52
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c30
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c62
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c54
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch5
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h19
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S421
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h311
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S264
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h83
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c21
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S49
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S99
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S119
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S184
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S220
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S68
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S62
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S70
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S67
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S48
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S58
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S50
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S266
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S9
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S265
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h128
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/smp.h30
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions6
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h13
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h34
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c37
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h166
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h1
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c45
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c55
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c8
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c57
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c94
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c55
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c148
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c54
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c127
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h91
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym12
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c21
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c26
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c59
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c26
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions7
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h3
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S334
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h718
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S2
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S40
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S81
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S93
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S819
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S470
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S152
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S47
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S121
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S120
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S43
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S47
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S57
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S327
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S11
-rw-r--r--libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S187
-rw-r--r--libpthread/nptl/sysdeps/x86_64/Makefile27
-rw-r--r--libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym16
-rw-r--r--libpthread/nptl/sysdeps/x86_64/tls.h3
219 files changed, 10971 insertions, 4697 deletions
diff --git a/libpthread/nptl/sysdeps/arm/tls.h b/libpthread/nptl/sysdeps/arm/tls.h
index b8efd59b3..29200a1d2 100644
--- a/libpthread/nptl/sysdeps/arm/tls.h
+++ b/libpthread/nptl/sysdeps/arm/tls.h
@@ -21,7 +21,6 @@
#define _TLS_H 1
#ifndef __ASSEMBLER__
-#include <dl-sysdep.h>
# include <stdbool.h>
# include <stddef.h>
diff --git a/libpthread/nptl/sysdeps/generic/lowlevellock.h b/libpthread/nptl/sysdeps/generic/lowlevellock.h
index 7f95daada..0600e1794 100644
--- a/libpthread/nptl/sysdeps/generic/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/generic/lowlevellock.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Low level locking macros used in NPTL implementation. Stub version.
+ Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,16 +21,6 @@
#include <atomic.h>
-/* Implement generic mutex. Basic futex syscall support is required:
-
- lll_futex_wait(futex, value) - call sys_futex with FUTEX_WAIT
- and third parameter VALUE
-
- lll_futex_wake(futex, value) - call sys_futex with FUTEX_WAKE
- and third parameter VALUE
-*/
-
-
/* Mutex lock counter:
bit 31 clear means unlocked;
bit 31 set means locked.
@@ -65,7 +56,9 @@ __generic_mutex_lock (int *mutex)
if (v >= 0)
continue;
- lll_futex_wait (mutex, v);
+ lll_futex_wait (mutex, v,
+ // XYZ check mutex flag
+ LLL_SHARED);
}
}
@@ -81,7 +74,9 @@ __generic_mutex_unlock (int *mutex)
/* There are other threads waiting for this mutex, wake one of them
up. */
- lll_futex_wake (mutex, 1);
+ lll_futex_wake (mutex, 1,
+ // XYZ check mutex flag
+ LLL_SHARED);
}
diff --git a/libpthread/nptl/sysdeps/i386/pthread_spin_lock.c b/libpthread/nptl/sysdeps/i386/pthread_spin_lock.c
index a6b1cf403..34cd525dc 100644
--- a/libpthread/nptl/sysdeps/i386/pthread_spin_lock.c
+++ b/libpthread/nptl/sysdeps/i386/pthread_spin_lock.c
@@ -29,7 +29,8 @@
int
-pthread_spin_lock (pthread_spinlock_t *lock)
+pthread_spin_lock (
+ pthread_spinlock_t *lock)
{
__asm__ ("\n"
"1:\t" LOCK_PREFIX "decl %0\n\t"
diff --git a/libpthread/nptl/sysdeps/i386/tcb-offsets.sym b/libpthread/nptl/sysdeps/i386/tcb-offsets.sym
index 4e0444ba3..69f9deb36 100644
--- a/libpthread/nptl/sysdeps/i386/tcb-offsets.sym
+++ b/libpthread/nptl/sysdeps/i386/tcb-offsets.sym
@@ -11,3 +11,7 @@ SYSINFO_OFFSET offsetof (tcbhead_t, sysinfo)
CLEANUP offsetof (struct pthread, cleanup)
CLEANUP_PREV offsetof (struct _pthread_cleanup_buffer, __prev)
MUTEX_FUTEX offsetof (pthread_mutex_t, __data.__lock)
+POINTER_GUARD offsetof (tcbhead_t, pointer_guard)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX offsetof (tcbhead_t, private_futex)
+#endif
diff --git a/libpthread/nptl/sysdeps/i386/tls.h b/libpthread/nptl/sysdeps/i386/tls.h
index 52bde9eec..5f27d8fec 100644
--- a/libpthread/nptl/sysdeps/i386/tls.h
+++ b/libpthread/nptl/sysdeps/i386/tls.h
@@ -1,5 +1,5 @@
/* Definition for thread-local data handling. nptl/i386 version.
- Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -26,6 +26,8 @@
# include <stdint.h>
# include <stdlib.h>
# include <list.h>
+# include <sysdep.h>
+# include <bits/kernel-features.h>
/* Type for the dtv. */
@@ -49,6 +51,15 @@ typedef struct
int multiple_threads;
uintptr_t sysinfo;
uintptr_t stack_guard;
+ uintptr_t pointer_guard;
+ int gscope_flag;
+#ifndef __ASSUME_PRIVATE_FUTEX
+ int private_futex;
+#else
+ int __unused1;
+#endif
+ /* Reservation of some values for the TM ABI. */
+ void *__private_tm[5];
} tcbhead_t;
# define TLS_MULTIPLE_THREADS_IN_TCB 1
@@ -64,7 +75,8 @@ typedef struct
#define HAVE_TLS_MODEL_ATTRIBUTE 1
/* Signal that TLS support is available. */
-#define USE_TLS 1
+#define USE_TLS 1
+
/* Alignment requirement for the stack. For IA-32 this is governed by
the SSE memory functions. */
@@ -99,6 +111,9 @@ union user_desc_init
};
+/* Get the thread descriptor definition. */
+# include <descr.h>
+
/* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
because NPTL getpid, __libc_alloca_cutoff etc. need (almost) the whole
struct pthread even when not linked with -lpthread. */
@@ -113,9 +128,6 @@ union user_desc_init
/* Alignment requirements for the TCB. */
# define TLS_TCB_ALIGN __alignof__ (struct pthread)
-/* Get the thread descriptor definition. */
-#include <descr.h>
-
/* The TCB can have any size and the memory following the address the
thread pointer points to is unspecified. Allocate the TCB there. */
# define TLS_TCB_AT_TP 1
@@ -220,7 +232,7 @@ union user_desc_init
_segdescr.vals[3] = 0x51; \
\
/* Install the TLS. */ \
- __asm__ volatile (TLS_LOAD_EBX \
+ __asm__ volatile (TLS_LOAD_EBX \
"int $0x80\n\t" \
TLS_LOAD_EBX \
: "=a" (_result), "=m" (_segdescr.desc.entry_number) \
@@ -256,7 +268,7 @@ union user_desc_init
do not get optimized away. */
# define THREAD_SELF \
({ struct pthread *__self; \
- __asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
+ __asm__ ("movl %%gs:%c1,%0" : "=r" (__self) \
: "i" (offsetof (struct pthread, header.self))); \
__self;})
@@ -270,11 +282,11 @@ union user_desc_init
# define THREAD_GETMEM(descr, member) \
({ __typeof (descr->member) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%gs:%P2,%b0" \
+ __asm__ volatile ("movb %%gs:%P2,%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member))); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%gs:%P1,%0" \
+ __asm__ volatile ("movl %%gs:%P1,%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member))); \
else \
@@ -297,12 +309,12 @@ union user_desc_init
# define THREAD_GETMEM_NC(descr, member, idx) \
({ __typeof (descr->member[0]) __value; \
if (sizeof (__value) == 1) \
- __asm__ volatile ("movb %%gs:%P2(%3),%b0" \
+ __asm__ volatile ("movb %%gs:%P2(%3),%b0" \
: "=q" (__value) \
: "0" (0), "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
else if (sizeof (__value) == 4) \
- __asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
+ __asm__ volatile ("movl %%gs:%P1(,%2,4),%0" \
: "=r" (__value) \
: "i" (offsetof (struct pthread, member[0])), \
"r" (idx)); \
@@ -350,7 +362,7 @@ union user_desc_init
/* Set member of the thread descriptor directly. */
# define THREAD_SETMEM_NC(descr, member, idx, value) \
({ if (sizeof (descr->member[0]) == 1) \
- __asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
+ __asm__ volatile ("movb %b0,%%gs:%P1(%2)" : \
: "iq" (value), \
"i" (offsetof (struct pthread, member)), \
"r" (idx)); \
@@ -366,7 +378,7 @@ union user_desc_init
4 or 8. */ \
abort (); \
\
- __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
+ __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t" \
"movl %%edx,%%gs:4+%P1(,%2,8)" : \
: "A" (value), \
"i" (offsetof (struct pthread, member)), \
@@ -389,6 +401,17 @@ union user_desc_init
__ret; })
+/* Atomic logical and. */
+#define THREAD_ATOMIC_AND(descr, member, val) \
+ (void) ({ if (sizeof ((descr)->member) == 4) \
+ __asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0" \
+ :: "i" (offsetof (struct pthread, member)), \
+ "ir" (val)); \
+ else \
+ /* Not necessary for other sizes in the moment. */ \
+ abort (); })
+
+
/* Atomic set bit. */
#define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
(void) ({ if (sizeof ((descr)->member) == 4) \
@@ -424,6 +447,34 @@ union user_desc_init
= THREAD_GETMEM (THREAD_SELF, header.stack_guard))
+/* Set the pointer guard field in the TCB head. */
+#define THREAD_SET_POINTER_GUARD(value) \
+ THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
+#define THREAD_COPY_POINTER_GUARD(descr) \
+ ((descr)->header.pointer_guard \
+ = THREAD_GETMEM (THREAD_SELF, header.pointer_guard))
+
+
+/* Get and set the global scope generation counter in the TCB head. */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED 1
+#define THREAD_GSCOPE_FLAG_WAIT 2
+#define THREAD_GSCOPE_RESET_FLAG() \
+ do \
+ { int __res; \
+ __asm__ volatile ("xchgl %0, %%gs:%P1" \
+ : "=r" (__res) \
+ : "i" (offsetof (struct pthread, header.gscope_flag)), \
+ "0" (THREAD_GSCOPE_FLAG_UNUSED)); \
+ if (__res == THREAD_GSCOPE_FLAG_WAIT) \
+ lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+ THREAD_SETMEM (THREAD_SELF, header.gscope_flag, THREAD_GSCOPE_FLAG_USED)
+#define THREAD_GSCOPE_WAIT() \
+ GL(dl_wait_lookup_done) ()
+
#endif /* __ASSEMBLER__ */
#endif /* tls.h */
diff --git a/libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym b/libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym
index 3962edbd5..8ac133dfd 100644
--- a/libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym
+++ b/libpthread/nptl/sysdeps/powerpc/tcb-offsets.sym
@@ -14,3 +14,7 @@ MULTIPLE_THREADS_OFFSET thread_offsetof (header.multiple_threads)
#endif
PID thread_offsetof (pid)
TID thread_offsetof (tid)
+POINTER_GUARD (offsetof (tcbhead_t, pointer_guard) - TLS_TCB_OFFSET - sizeof (tcbhead_t))
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX_OFFSET thread_offsetof (header.private_futex)
+#endif
diff --git a/libpthread/nptl/sysdeps/powerpc/tls.h b/libpthread/nptl/sysdeps/powerpc/tls.h
index 1157116a3..ce5559eef 100644
--- a/libpthread/nptl/sysdeps/powerpc/tls.h
+++ b/libpthread/nptl/sysdeps/powerpc/tls.h
@@ -1,5 +1,5 @@
/* Definition for thread-local data handling. NPTL/PowerPC version.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -47,7 +47,7 @@ typedef union dtv
#endif
/* Signal that TLS support is available. */
-# define USE_TLS 1
+# define USE_TLS 1
#ifndef __ASSEMBLER__
@@ -64,9 +64,11 @@ typedef union dtv
# include <nptl/descr.h>
/* The stack_guard is accessed directly by GCC -fstack-protector code,
- so it is a part of public ABI. The dtv field is private. */
+ so it is a part of public ABI. The dtv and pointer_guard fields
+ are private. */
typedef struct
{
+ uintptr_t pointer_guard;
uintptr_t stack_guard;
dtv_t *dtv;
} tcbhead_t;
@@ -164,10 +166,44 @@ register void *__thread_register __asm__ ("r13");
= ((tcbhead_t *) ((char *) __thread_register \
- TLS_TCB_OFFSET))[-1].stack_guard)
+/* Set the stack guard field in TCB head. */
+# define THREAD_GET_POINTER_GUARD() \
+ (((tcbhead_t *) ((char *) __thread_register \
+ - TLS_TCB_OFFSET))[-1].pointer_guard)
+# define THREAD_SET_POINTER_GUARD(value) \
+ (THREAD_GET_POINTER_GUARD () = (value))
+# define THREAD_COPY_POINTER_GUARD(descr) \
+ (((tcbhead_t *) ((char *) (descr) \
+ + TLS_PRE_TCB_SIZE))[-1].pointer_guard \
+ = THREAD_GET_POINTER_GUARD())
+
/* l_tls_offset == 0 is perfectly valid on PPC, so we have to use some
different value to mean unset l_tls_offset. */
# define NO_TLS_OFFSET -1
+/* Get and set the global scope generation counter in struct pthread. */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED 1
+#define THREAD_GSCOPE_FLAG_WAIT 2
+#define THREAD_GSCOPE_RESET_FLAG() \
+ do \
+ { int __res \
+ = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ THREAD_GSCOPE_FLAG_UNUSED); \
+ if (__res == THREAD_GSCOPE_FLAG_WAIT) \
+ lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+ do \
+ { \
+ THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
+ atomic_write_barrier (); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_WAIT() \
+ GL(dl_wait_lookup_done) ()
+
#endif /* __ASSEMBLER__ */
#endif /* tls.h */
diff --git a/libpthread/nptl/sysdeps/pthread/Makefile.in b/libpthread/nptl/sysdeps/pthread/Makefile.in
index 0d1097929..9497789ab 100644
--- a/libpthread/nptl/sysdeps/pthread/Makefile.in
+++ b/libpthread/nptl/sysdeps/pthread/Makefile.in
@@ -18,7 +18,7 @@ libpthread_CSRC = pthread_barrier_wait.c pthread_cond_broadcast.c \
pthread_rwlock_wrlock.c pthread_sigmask.c \
pthread_spin_destroy.c pthread_spin_init.c \
pthread_spin_unlock.c pt-sigfillset.c \
- pt-longjmp.c
+ pt-longjmp.c tpp.c
ifeq ($(TARGET_ARCH),i386)
@@ -43,6 +43,13 @@ SH_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
libpthread_CSRC := $(filter-out $(SH_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
endif
+ifeq ($(TARGET_ARCH),sparc)
+SPARC_PTHREAD_EXCLUDE_LIST = pthread_barrier_init.c pthread_barrier_wait.c \
+ pthread_barrier_destroy.c
+
+libpthread_CSRC := $(filter-out $(SPARC_PTHREAD_EXCLUDE_LIST),$(libpthread_CSRC))
+endif
+
ifeq ($(TARGET_ARCH),x86_64)
X64_PTHREAD_EXCLUDE_LIST = pthread_spin_unlock.c pthread_spin_init.c \
pthread_barrier_wait.c pthread_cond_broadcast.c \
diff --git a/libpthread/nptl/sysdeps/pthread/allocalim.h b/libpthread/nptl/sysdeps/pthread/allocalim.h
index 35224ec74..f13c3a330 100644
--- a/libpthread/nptl/sysdeps/pthread/allocalim.h
+++ b/libpthread/nptl/sysdeps/pthread/allocalim.h
@@ -1,5 +1,5 @@
/* Determine whether block of given size can be allocated on the stack or not.
- Copyright (C) 2002 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,7 +21,8 @@
#include <limits.h>
-extern inline int
+extern int
+__always_inline
__libc_use_alloca (size_t size)
{
return (__builtin_expect (size <= PTHREAD_STACK_MIN / 4, 1)
diff --git a/libpthread/nptl/sysdeps/pthread/bits/libc-lock.h b/libpthread/nptl/sysdeps/pthread/bits/libc-lock.h
index c59e3a0cc..70fe6762f 100644
--- a/libpthread/nptl/sysdeps/pthread/bits/libc-lock.h
+++ b/libpthread/nptl/sysdeps/pthread/bits/libc-lock.h
@@ -1,5 +1,5 @@
/* libc-internal interface for mutex locks. NPTL version.
- Copyright (C) 1996-2001, 2002, 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 1996-2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -150,13 +150,17 @@ typedef pthread_key_t __libc_key_t;
/* Call thread functions through the function pointer table. */
#if defined SHARED && !defined NOT_IN_libc
-# define PTF(NAME) __libc_pthread_functions.ptr_##NAME
+# define PTFAVAIL(NAME) __libc_pthread_functions_init
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
- (PTF(FUNC) != NULL ? PTF(FUNC) ARGS : ELSE)
+ (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+ PTHFCT_CALL (ptr_##FUNC, ARGS)
#else
-# define PTF(NAME) NAME
+# define PTFAVAIL(NAME) (NAME != NULL)
# define __libc_ptf_call(FUNC, ARGS, ELSE) \
__libc_maybe_call (FUNC, ARGS, ELSE)
+# define __libc_ptf_call_always(FUNC, ARGS) \
+ FUNC ARGS
#endif
@@ -168,8 +172,15 @@ typedef pthread_key_t __libc_key_t;
# define __libc_lock_init(NAME) \
__libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
#endif
-#define __libc_rwlock_init(NAME) \
+#if defined SHARED && !defined NOT_IN_libc
+/* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER, 0) is
+ inefficient. */
+# define __libc_rwlock_init(NAME) \
+ (__builtin_memset (&(NAME), '\0', sizeof (NAME)), 0)
+#else
+# define __libc_rwlock_init(NAME) \
__libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
+#endif
/* Same as last but this time we initialize a recursive mutex. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
@@ -210,8 +221,12 @@ typedef pthread_key_t __libc_key_t;
# define __libc_lock_fini(NAME) \
__libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
#endif
-#define __libc_rwlock_fini(NAME) \
+#if defined SHARED && !defined NOT_IN_libc
+# define __libc_rwlock_fini(NAME) ((void) 0)
+#else
+# define __libc_rwlock_fini(NAME) \
__libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
+#endif
/* Finalize recursive named lock. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
@@ -224,7 +239,7 @@ typedef pthread_key_t __libc_key_t;
/* Lock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_lock(NAME) \
- ({ lll_lock (NAME); 0; })
+ ({ lll_lock (NAME, LLL_PRIVATE); 0; })
#else
# define __libc_lock_lock(NAME) \
__libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
@@ -241,7 +256,7 @@ typedef pthread_key_t __libc_key_t;
void *self = THREAD_SELF; \
if ((NAME).owner != self) \
{ \
- lll_lock ((NAME).lock); \
+ lll_lock ((NAME).lock, LLL_PRIVATE); \
(NAME).owner = self; \
} \
++(NAME).cnt; \
@@ -295,7 +310,7 @@ typedef pthread_key_t __libc_key_t;
/* Unlock the named lock variable. */
#if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
# define __libc_lock_unlock(NAME) \
- lll_unlock (NAME)
+ lll_unlock (NAME, LLL_PRIVATE)
#else
# define __libc_lock_unlock(NAME) \
__libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
@@ -311,7 +326,7 @@ typedef pthread_key_t __libc_key_t;
if (--(NAME).cnt == 0) \
{ \
(NAME).owner = NULL; \
- lll_unlock ((NAME).lock); \
+ lll_unlock ((NAME).lock, LLL_PRIVATE); \
} \
} while (0)
#else
@@ -353,8 +368,9 @@ typedef pthread_key_t __libc_key_t;
/* Call handler iff the first call. */
#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
do { \
- if (PTF(__pthread_once) != NULL) \
- PTF(__pthread_once) (&(ONCE_CONTROL), INIT_FUNCTION); \
+ if (PTFAVAIL (__pthread_once)) \
+ __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \
+ INIT_FUNCTION)); \
else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
INIT_FUNCTION (); \
(ONCE_CONTROL) |= 2; \
@@ -380,9 +396,10 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
{ struct _pthread_cleanup_buffer _buffer; \
int _avail; \
if (DOIT) { \
- _avail = PTF(_pthread_cleanup_push_defer) != NULL; \
+ _avail = PTFAVAIL (_pthread_cleanup_push_defer); \
if (_avail) { \
- PTF(_pthread_cleanup_push_defer) (&_buffer, FCT, ARG); \
+ __libc_ptf_call_always (_pthread_cleanup_push_defer, (&_buffer, FCT, \
+ ARG)); \
} else { \
_buffer.__routine = (FCT); \
_buffer.__arg = (ARG); \
@@ -394,7 +411,7 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
/* End critical region with cleanup. */
#define __libc_cleanup_region_end(DOIT) \
if (_avail) { \
- PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
+ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
} else if (DOIT) \
_buffer.__routine (_buffer.__arg); \
}
@@ -402,16 +419,13 @@ extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer
/* Sometimes we have to exit the block in the middle. */
#define __libc_cleanup_end(DOIT) \
if (_avail) { \
- PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
+ __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\
} else if (DOIT) \
_buffer.__routine (_buffer.__arg)
/* Normal cleanup handling, based on C cleanup attribute. */
__extern_inline void
-__libc_cleanup_routine (struct __pthread_cleanup_frame *f);
-
-__extern_inline void
__libc_cleanup_routine (struct __pthread_cleanup_frame *f)
{
if (f->__do_it)
@@ -531,6 +545,7 @@ weak_extern (__pthread_key_create)
weak_extern (__pthread_setspecific)
weak_extern (__pthread_getspecific)
weak_extern (__pthread_once)
+weak_extern (__pthread_initialize)
weak_extern (__pthread_atfork)
#ifdef SHARED
weak_extern (_pthread_cleanup_push_defer)
@@ -556,6 +571,7 @@ weak_extern (pthread_setcancelstate)
# pragma weak __pthread_setspecific
# pragma weak __pthread_getspecific
# pragma weak __pthread_once
+# pragma weak __pthread_initialize
# pragma weak __pthread_atfork
# pragma weak _pthread_cleanup_push_defer
# pragma weak _pthread_cleanup_pop_restore
diff --git a/libpthread/nptl/sysdeps/pthread/bits/sigthread.h b/libpthread/nptl/sysdeps/pthread/bits/sigthread.h
index 960bde18a..9a524e57d 100644
--- a/libpthread/nptl/sysdeps/pthread/bits/sigthread.h
+++ b/libpthread/nptl/sysdeps/pthread/bits/sigthread.h
@@ -1,5 +1,5 @@
/* Signal handling function for threaded programs.
- Copyright (C) 1998, 1999, 2000, 2002 Free Software Foundation, Inc.
+ Copyright (C) 1998, 1999, 2000, 2002, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -35,4 +35,10 @@ extern int pthread_sigmask (int __how,
/* Send signal SIGNO to the given thread. */
extern int pthread_kill (pthread_t __threadid, int __signo) __THROW;
+#ifdef __USE_GNU
+/* Queue signal and data to a thread. */
+extern int pthread_sigqueue (pthread_t __threadid, int __signo,
+ const union sigval __value) __THROW;
+#endif
+
#endif /* bits/sigthread.h */
diff --git a/libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h b/libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h
index cd64bc37e..b8efdd8d5 100644
--- a/libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h
+++ b/libpthread/nptl/sysdeps/pthread/bits/stdio-lock.h
@@ -1,5 +1,5 @@
/* Thread package specific definitions of stream lock type. NPTL version.
- Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 2000, 2001, 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -42,7 +42,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
void *__self = THREAD_SELF; \
if ((_name).owner != __self) \
{ \
- lll_lock ((_name).lock); \
+ lll_lock ((_name).lock, LLL_PRIVATE); \
(_name).owner = __self; \
} \
++(_name).cnt; \
@@ -72,7 +72,7 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
if (--(_name).cnt == 0) \
{ \
(_name).owner = NULL; \
- lll_unlock ((_name).lock); \
+ lll_unlock ((_name).lock, LLL_PRIVATE); \
} \
} while (0)
@@ -94,9 +94,15 @@ typedef struct { int lock; int cnt; void *owner; } _IO_lock_t;
__attribute__((cleanup (_IO_acquire_lock_fct))) \
= (_fp); \
_IO_flockfile (_IO_acquire_lock_file);
-
+# define _IO_acquire_lock_clear_flags2(_fp) \
+ do { \
+ _IO_FILE *_IO_acquire_lock_file \
+ __attribute__((cleanup (_IO_acquire_lock_clear_flags2_fct))) \
+ = (_fp); \
+ _IO_flockfile (_IO_acquire_lock_file);
# else
# define _IO_acquire_lock(_fp) _IO_acquire_lock_needs_exceptions_enabled
+# define _IO_acquire_lock_clear_flags2(_fp) _IO_acquire_lock (_fp)
# endif
# define _IO_release_lock(_fp) ; } while (0)
diff --git a/libpthread/nptl/sysdeps/pthread/createthread.c b/libpthread/nptl/sysdeps/pthread/createthread.c
index 88ffe09d3..a676e277f 100644
--- a/libpthread/nptl/sysdeps/pthread/createthread.c
+++ b/libpthread/nptl/sysdeps/pthread/createthread.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -56,11 +56,11 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
PREPARE_CREATE;
#endif
- if (stopped)
- /* We Make sure the thread does not run far by forcing it to get a
+ if (__builtin_expect (stopped != 0, 0))
+ /* We make sure the thread does not run far by forcing it to get a
lock. We lock it here too so that the new thread cannot continue
until we tell it to. */
- lll_lock (pd->lock);
+ lll_lock (pd->lock, LLL_PRIVATE);
/* One more thread. We cannot have the thread do this itself, since it
might exist but not have been scheduled yet by the time we've returned
@@ -84,7 +84,8 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
if (IS_DETACHED (pd))
__deallocate_stack (pd);
- return errno;
+ /* We have to translate error codes. */
+ return errno == ENOMEM ? EAGAIN : errno;
}
/* Now we have the possibility to set scheduling parameters etc. */
@@ -97,7 +98,7 @@ do_clone (struct pthread *pd, const struct pthread_attr *attr,
if (attr->cpuset != NULL)
{
res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
- sizeof (cpu_set_t), attr->cpuset);
+ attr->cpusetsize, attr->cpuset);
if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
{
@@ -223,7 +224,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
__nptl_create_event ();
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
}
return res;
@@ -242,6 +243,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
|| (attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))
stopped = true;
pd->stopped_start = stopped;
+ pd->parent_cancelhandling = THREAD_GETMEM (THREAD_SELF, cancelhandling);
/* Actually create the thread. */
int res = do_clone (pd, attr, clone_flags, start_thread,
@@ -249,7 +251,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr,
if (res == 0 && stopped)
/* And finally restart the new thread. */
- lll_unlock (pd->lock);
+ lll_unlock (pd->lock, LLL_PRIVATE);
return res;
}
diff --git a/libpthread/nptl/sysdeps/pthread/librt-cancellation.c b/libpthread/nptl/sysdeps/pthread/librt-cancellation.c
index 753a2d831..ad189e81a 100644
--- a/libpthread/nptl/sysdeps/pthread/librt-cancellation.c
+++ b/libpthread/nptl/sysdeps/pthread/librt-cancellation.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -17,92 +17,9 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-#include <setjmp.h>
-#include <signal.h>
-#include <stdlib.h>
#include "pthreadP.h"
-#include "atomic.h"
-#ifdef IS_IN_librt
-/* The next two functions are similar to pthread_setcanceltype() but
- more specialized for the use in the cancelable functions like write().
- They do not need to check parameters etc. */
-int
-attribute_hidden
-__librt_enable_asynccancel (void)
-{
- struct pthread *self = THREAD_SELF;
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- int newval = oldval | CANCELTYPE_BITMASK;
-
- if (__builtin_expect ((oldval & CANCELED_BITMASK) != 0, 0))
- {
- /* If we are already exiting or if PTHREAD_CANCEL_DISABLED,
- stop right here. */
- if ((oldval & (EXITING_BITMASK | CANCELSTATE_BITMASK)) != 0)
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
- newval, oldval);
- if (__builtin_expect (curval != oldval, 0))
- {
- /* Somebody else modified the word, try again. */
- oldval = curval;
- continue;
- }
-
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
-
- __do_cancel ();
-
- /* NOTREACHED */
- }
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__builtin_expect (curval == oldval, 1))
- break;
-
- /* Prepare the next round. */
- oldval = curval;
- }
-
- return oldval;
-}
-
-
-void
-internal_function attribute_hidden
-__librt_disable_asynccancel (int oldtype)
-{
- /* If asynchronous cancellation was enabled before we do not have
- anything to do. */
- if (oldtype & CANCELTYPE_BITMASK)
- return;
-
- struct pthread *self = THREAD_SELF;
- int oldval = THREAD_GETMEM (self, cancelhandling);
-
- while (1)
- {
- int newval = oldval & ~CANCELTYPE_BITMASK;
-
- if (newval == oldval)
- break;
-
- int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
- oldval);
- if (__builtin_expect (curval == oldval, 1))
- break;
-
- /* Prepare the next round. */
- oldval = curval;
- }
-}
-
-
-#endif
+#define __pthread_enable_asynccancel __librt_enable_asynccancel
+#define __pthread_disable_asynccancel __librt_disable_asynccancel
+#include "cancellation.c"
diff --git a/libpthread/nptl/sysdeps/pthread/list.h b/libpthread/nptl/sysdeps/pthread/list.h
index 43186a2d5..6ddccb9fb 100644
--- a/libpthread/nptl/sysdeps/pthread/list.h
+++ b/libpthread/nptl/sysdeps/pthread/list.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -46,24 +46,13 @@ typedef struct list_head
static inline void
list_add (list_t *newp, list_t *head)
{
- head->next->prev = newp;
newp->next = head->next;
newp->prev = head;
+ head->next->prev = newp;
head->next = newp;
}
-/* Add new element at the tail of the list. */
-static inline void
-list_add_tail (list_t *newp, list_t *head)
-{
- head->prev->next = newp;
- newp->next = head;
- newp->prev = head->prev;
- head->prev = newp;
-}
-
-
/* Remove element from list. */
static inline void
list_del (list_t *elem)
diff --git a/libpthread/nptl/sysdeps/pthread/malloc-machine.h b/libpthread/nptl/sysdeps/pthread/malloc-machine.h
index efab230aa..e99aaa781 100644
--- a/libpthread/nptl/sysdeps/pthread/malloc-machine.h
+++ b/libpthread/nptl/sysdeps/pthread/malloc-machine.h
@@ -1,6 +1,6 @@
/* Basic platform-independent macro definitions for mutexes,
thread-specific data and parameters for malloc.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -38,13 +38,24 @@ extern void *__dso_handle __attribute__ ((__weak__));
#include <fork.h>
+#define ATFORK_MEM static struct fork_handler atfork_mem
+
#ifdef SHARED
# define thread_atfork(prepare, parent, child) \
- __register_atfork (prepare, parent, child, __dso_handle)
+ atfork_mem.prepare_handler = prepare; \
+ atfork_mem.parent_handler = parent; \
+ atfork_mem.child_handler = child; \
+ atfork_mem.dso_handle = __dso_handle; \
+ atfork_mem.refcntr = 1; \
+ __linkin_atfork (&atfork_mem)
#else
# define thread_atfork(prepare, parent, child) \
- __register_atfork (prepare, parent, child, \
- &__dso_handle == NULL ? NULL : __dso_handle)
+ atfork_mem.prepare_handler = prepare; \
+ atfork_mem.parent_handler = parent; \
+ atfork_mem.child_handler = child; \
+ atfork_mem.dso_handle = &__dso_handle == NULL ? NULL : __dso_handle; \
+ atfork_mem.refcntr = 1; \
+ __linkin_atfork (&atfork_mem)
#endif
/* thread specific data for glibc */
@@ -52,10 +63,10 @@ extern void *__dso_handle __attribute__ ((__weak__));
#include <bits/libc-tsd.h>
typedef int tsd_key_t[1]; /* no key data structure, libc magic does it */
-__libc_tsd_define (static, MALLOC) /* declaration/common definition */
+__libc_tsd_define (static, void *, MALLOC) /* declaration/common definition */
#define tsd_key_create(key, destr) ((void) (key))
-#define tsd_setspecific(key, data) __libc_tsd_set (MALLOC, (data))
-#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (MALLOC))
+#define tsd_setspecific(key, data) __libc_tsd_set (void *, MALLOC, (data))
+#define tsd_getspecific(key, vptr) ((vptr) = __libc_tsd_get (void *, MALLOC))
#include <sysdeps/generic/malloc-machine.h>
diff --git a/libpthread/nptl/sysdeps/pthread/pt-initfini.c b/libpthread/nptl/sysdeps/pthread/pt-initfini.c
index 5955a7efc..b26a50456 100644
--- a/libpthread/nptl/sysdeps/pthread/pt-initfini.c
+++ b/libpthread/nptl/sysdeps/pthread/pt-initfini.c
@@ -72,7 +72,7 @@ call_initialize_minimal (void)
}
SECTION (".init");
-extern void _init (void);
+extern void __attribute__ ((section (".init"))) _init (void);
void
_init (void)
{
@@ -93,7 +93,7 @@ asm ("\n/*@_init_EPILOG_ENDS*/");
asm ("\n/*@_fini_PROLOG_BEGINS*/");
SECTION (".fini");
-extern void _fini (void);
+extern void __attribute__ ((section (".fini"))) _fini (void);
void
_fini (void)
{
diff --git a/libpthread/nptl/sysdeps/pthread/pt-longjmp.c b/libpthread/nptl/sysdeps/pthread/pt-longjmp.c
index b4106fdba..f161380ea 100644
--- a/libpthread/nptl/sysdeps/pthread/pt-longjmp.c
+++ b/libpthread/nptl/sysdeps/pthread/pt-longjmp.c
@@ -21,8 +21,6 @@
#include <stdlib.h>
#include "pthreadP.h"
-extern void __libc_longjmp (sigjmp_buf env, int val)
- __attribute__ ((noreturn));
void
longjmp (jmp_buf env, int val)
{
diff --git a/libpthread/nptl/sysdeps/pthread/pthread-functions.h b/libpthread/nptl/sysdeps/pthread/pthread-functions.h
index 813d55621..0c404fcbb 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread-functions.h
+++ b/libpthread/nptl/sysdeps/pthread/pthread-functions.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -23,6 +23,7 @@
#include <pthread.h>
#include <setjmp.h>
#include <internaltypes.h>
+#include <sysdep.h>
struct xid_command;
@@ -72,12 +73,8 @@ struct pthread_functions
int (*ptr_pthread_mutex_destroy) (pthread_mutex_t *);
int (*ptr_pthread_mutex_init) (pthread_mutex_t *,
const pthread_mutexattr_t *);
- int (*ptr_pthread_mutex_trylock) (pthread_mutex_t *);
int (*ptr_pthread_mutex_lock) (pthread_mutex_t *);
int (*ptr_pthread_mutex_unlock) (pthread_mutex_t *);
- int (*ptr_pthread_mutexattr_init) (pthread_mutexattr_t *attr);
- int (*ptr_pthread_mutexattr_destroy) (pthread_mutexattr_t *attr);
- int (*ptr_pthread_mutexattr_settype) (pthread_mutexattr_t *attr, int kind);
pthread_t (*ptr_pthread_self) (void);
int (*ptr_pthread_setcancelstate) (int, int *);
int (*ptr_pthread_setcanceltype) (int, int *);
@@ -99,9 +96,22 @@ struct pthread_functions
__attribute ((noreturn)) __cleanup_fct_attribute;
void (*ptr__nptl_deallocate_tsd) (void);
int (*ptr__nptl_setxid) (struct xid_command *);
+ void (*ptr_freeres) (void);
};
/* Variable in libc.so. */
extern struct pthread_functions __libc_pthread_functions attribute_hidden;
+extern int __libc_pthread_functions_init attribute_hidden;
+
+#ifdef PTR_DEMANGLE
+# define PTHFCT_CALL(fct, params) \
+ ({ __typeof (__libc_pthread_functions.fct) __p; \
+ __p = __libc_pthread_functions.fct; \
+ PTR_DEMANGLE (__p); \
+ __p params; })
+#else
+# define PTHFCT_CALL(fct, params) \
+ __libc_pthread_functions.fct params
+#endif
#endif /* pthread-functions.h */
diff --git a/libpthread/nptl/sysdeps/pthread/pthread.h b/libpthread/nptl/sysdeps/pthread/pthread.h
index c4bfc0e81..deb74309a 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread.h
+++ b/libpthread/nptl/sysdeps/pthread/pthread.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +21,7 @@
#define _PTHREAD_H 1
#include <features.h>
+#include <endian.h>
#include <sched.h>
#include <time.h>
@@ -50,7 +52,7 @@ enum
PTHREAD_MUTEX_RECURSIVE_NP,
PTHREAD_MUTEX_ERRORCHECK_NP,
PTHREAD_MUTEX_ADAPTIVE_NP
-#ifdef __USE_UNIX98
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
,
PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP,
PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP,
@@ -63,6 +65,30 @@ enum
#endif
};
+
+#ifdef __USE_XOPEN2K
+/* Robust mutex or not flags. */
+enum
+{
+ PTHREAD_MUTEX_STALLED,
+ PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED,
+ PTHREAD_MUTEX_ROBUST,
+ PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST
+};
+#endif
+
+
+#ifdef __USE_UNIX98
+/* Mutex protocols. */
+enum
+{
+ PTHREAD_PRIO_NONE,
+ PTHREAD_PRIO_INHERIT,
+ PTHREAD_PRIO_PROTECT
+};
+#endif
+
+
/* Mutex initializers. */
#if __WORDSIZE == 64
# define PTHREAD_MUTEX_INITIALIZER \
@@ -88,6 +114,7 @@ enum
# endif
#endif
+
/* Read-write lock types. */
#if defined __USE_UNIX98 || defined __USE_XOPEN2K
enum
@@ -99,21 +126,23 @@ enum
};
/* Read-write lock initializers. */
-# if __WORDSIZE == 64
-# define PTHREAD_RWLOCK_INITIALIZER \
+# define PTHREAD_RWLOCK_INITIALIZER \
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
-# else
-# define PTHREAD_RWLOCK_INITIALIZER \
- { { 0, 0, 0, 0, 0, 0, 0, 0 } }
-# endif
# ifdef __USE_GNU
# if __WORDSIZE == 64
# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
- PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } }
# else
-# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
- { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, 0 } }
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+ { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, \
+ 0, 0, 0, 0 } }
+# else
+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \
+ { { 0, 0, 0, 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,\
+ 0 } }
+# endif
# endif
# endif
#endif /* Unix98 or XOpen2K */
@@ -201,7 +230,7 @@ __BEGIN_DECLS
extern int pthread_create (pthread_t *__restrict __newthread,
__const pthread_attr_t *__restrict __attr,
void *(*__start_routine) (void *),
- void *__restrict __arg) __THROW;
+ void *__restrict __arg) __THROW __nonnull ((1, 3));
/* Terminate calling thread.
@@ -251,71 +280,78 @@ extern int pthread_equal (pthread_t __thread1, pthread_t __thread2) __THROW;
/* Initialize thread attribute *ATTR with default attributes
(detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER,
no user-provided stack). */
-extern int pthread_attr_init (pthread_attr_t *__attr) __THROW;
+extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1));
/* Destroy thread attribute *ATTR. */
-extern int pthread_attr_destroy (pthread_attr_t *__attr) __THROW;
+extern int pthread_attr_destroy (pthread_attr_t *__attr)
+ __THROW __nonnull ((1));
/* Get detach state attribute. */
extern int pthread_attr_getdetachstate (__const pthread_attr_t *__attr,
- int *__detachstate) __THROW;
+ int *__detachstate)
+ __THROW __nonnull ((1, 2));
/* Set detach state attribute. */
extern int pthread_attr_setdetachstate (pthread_attr_t *__attr,
- int __detachstate) __THROW;
+ int __detachstate)
+ __THROW __nonnull ((1));
/* Get the size of the guard area created for stack overflow protection. */
extern int pthread_attr_getguardsize (__const pthread_attr_t *__attr,
- size_t *__guardsize) __THROW;
+ size_t *__guardsize)
+ __THROW __nonnull ((1, 2));
/* Set the size of the guard area created for stack overflow protection. */
extern int pthread_attr_setguardsize (pthread_attr_t *__attr,
- size_t __guardsize) __THROW;
+ size_t __guardsize)
+ __THROW __nonnull ((1));
/* Return in *PARAM the scheduling parameters of *ATTR. */
extern int pthread_attr_getschedparam (__const pthread_attr_t *__restrict
__attr,
struct sched_param *__restrict __param)
- __THROW;
+ __THROW __nonnull ((1, 2));
/* Set scheduling parameters (priority, etc) in *ATTR according to PARAM. */
extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr,
__const struct sched_param *__restrict
- __param) __THROW;
+ __param) __THROW __nonnull ((1, 2));
/* Return in *POLICY the scheduling policy of *ATTR. */
extern int pthread_attr_getschedpolicy (__const pthread_attr_t *__restrict
__attr, int *__restrict __policy)
- __THROW;
+ __THROW __nonnull ((1, 2));
/* Set scheduling policy in *ATTR according to POLICY. */
extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy)
- __THROW;
+ __THROW __nonnull ((1));
/* Return in *INHERIT the scheduling inheritance mode of *ATTR. */
extern int pthread_attr_getinheritsched (__const pthread_attr_t *__restrict
__attr, int *__restrict __inherit)
- __THROW;
+ __THROW __nonnull ((1, 2));
/* Set scheduling inheritance mode in *ATTR according to INHERIT. */
extern int pthread_attr_setinheritsched (pthread_attr_t *__attr,
- int __inherit) __THROW;
+ int __inherit)
+ __THROW __nonnull ((1));
/* Return in *SCOPE the scheduling contention scope of *ATTR. */
extern int pthread_attr_getscope (__const pthread_attr_t *__restrict __attr,
- int *__restrict __scope) __THROW;
+ int *__restrict __scope)
+ __THROW __nonnull ((1, 2));
/* Set scheduling contention scope in *ATTR according to SCOPE. */
extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope)
- __THROW;
+ __THROW __nonnull ((1));
/* Return the previously set address for the stack. */
extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
__attr, void **__restrict __stackaddr)
- __THROW __attribute_deprecated__;
+ __THROW __nonnull ((1, 2)) __attribute_deprecated__;
/* Set the starting address of the stack of the thread to be created.
Depending on whether the stack grows up or down the value must either
@@ -323,30 +359,32 @@ extern int pthread_attr_getstackaddr (__const pthread_attr_t *__restrict
minimal size of the block must be PTHREAD_STACK_MIN. */
extern int pthread_attr_setstackaddr (pthread_attr_t *__attr,
void *__stackaddr)
- __THROW __attribute_deprecated__;
+ __THROW __nonnull ((1)) __attribute_deprecated__;
/* Return the currently used minimal stack size. */
extern int pthread_attr_getstacksize (__const pthread_attr_t *__restrict
__attr, size_t *__restrict __stacksize)
- __THROW;
+ __THROW __nonnull ((1, 2));
/* Add information about the minimum stack size needed for the thread
to be started. This size must never be less than PTHREAD_STACK_MIN
and must also not exceed the system limits. */
extern int pthread_attr_setstacksize (pthread_attr_t *__attr,
- size_t __stacksize) __THROW;
+ size_t __stacksize)
+ __THROW __nonnull ((1));
#ifdef __USE_XOPEN2K
/* Return the previously set address for the stack. */
extern int pthread_attr_getstack (__const pthread_attr_t *__restrict __attr,
void **__restrict __stackaddr,
- size_t *__restrict __stacksize) __THROW;
+ size_t *__restrict __stacksize)
+ __THROW __nonnull ((1, 2, 3));
/* The following two interfaces are intended to replace the last two. They
require setting the address as well as the size since only setting the
address will make the implementation on some architectures impossible. */
extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
- size_t __stacksize) __THROW;
+ size_t __stacksize) __THROW __nonnull ((1));
#endif
#ifdef __USE_GNU
@@ -354,19 +392,22 @@ extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr,
the processors represented in CPUSET. */
extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr,
size_t __cpusetsize,
- __const cpu_set_t *__cpuset) __THROW;
+ __const cpu_set_t *__cpuset)
+ __THROW __nonnull ((1, 3));
/* Get bit set in CPUSET representing the processors threads created with
ATTR can run on. */
extern int pthread_attr_getaffinity_np (__const pthread_attr_t *__attr,
size_t __cpusetsize,
- cpu_set_t *__cpuset) __THROW;
+ cpu_set_t *__cpuset)
+ __THROW __nonnull ((1, 3));
/* Initialize thread attribute *ATTR with attributes corresponding to the
- already running thread TH. It shall be called on unitialized ATTR
+ already running thread TH. It shall be called on uninitialized ATTR
and destroyed with pthread_attr_destroy when no longer needed. */
-extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
+extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr)
+ __THROW __nonnull ((2));
#endif
@@ -376,13 +417,13 @@ extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) __THROW;
and *PARAM. */
extern int pthread_setschedparam (pthread_t __target_thread, int __policy,
__const struct sched_param *__param)
- __THROW;
+ __THROW __nonnull ((3));
/* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */
extern int pthread_getschedparam (pthread_t __target_thread,
int *__restrict __policy,
struct sched_param *__restrict __param)
- __THROW;
+ __THROW __nonnull ((2, 3));
/* Set the scheduling priority for TARGET_THREAD. */
extern int pthread_setschedprio (pthread_t __target_thread, int __prio)
@@ -408,11 +449,13 @@ extern int pthread_yield (void) __THROW;
/* Limit specified thread TH to run only on the processors represented
in CPUSET. */
extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize,
- __const cpu_set_t *__cpuset) __THROW;
+ __const cpu_set_t *__cpuset)
+ __THROW __nonnull ((3));
/* Get bit set in CPUSET representing the processors TH can run on. */
extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
- cpu_set_t *__cpuset) __THROW;
+ cpu_set_t *__cpuset)
+ __THROW __nonnull ((3));
#endif
@@ -426,7 +469,7 @@ extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize,
The initialization functions might throw exception which is why
this function is not marked with __THROW. */
extern int pthread_once (pthread_once_t *__once_control,
- void (*__init_routine) (void));
+ void (*__init_routine) (void)) __nonnull ((1, 2));
/* Functions for handling cancellation.
@@ -539,9 +582,6 @@ class __pthread_cleanup_class
needed or fall back on the copy which must exist somewhere
else. */
__extern_inline void
-__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame);
-
-__extern_inline void
__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
{
if (__frame->__do_it)
@@ -603,7 +643,7 @@ __pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
__pthread_unwind_buf_t __cancel_buf; \
void (*__cancel_routine) (void *) = (routine); \
void *__cancel_arg = (arg); \
- int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) \
+ int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
__cancel_buf.__cancel_jmp_buf, 0); \
if (__builtin_expect (not_first_call, 0)) \
{ \
@@ -620,6 +660,7 @@ extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
If EXECUTE is non-zero, the handler function is called. */
# define pthread_cleanup_pop(execute) \
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
} while (0); \
__pthread_unregister_cancel (&__cancel_buf); \
if (execute) \
@@ -637,7 +678,7 @@ extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
__pthread_unwind_buf_t __cancel_buf; \
void (*__cancel_routine) (void *) = (routine); \
void *__cancel_arg = (arg); \
- int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) \
+ int not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \
__cancel_buf.__cancel_jmp_buf, 0); \
if (__builtin_expect (not_first_call, 0)) \
{ \
@@ -655,6 +696,7 @@ extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
restores the cancellation type that was in effect when the matching
pthread_cleanup_push_defer was called. */
# define pthread_cleanup_pop_restore_np(execute) \
+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\
} while (0); \
__pthread_unregister_cancel_restore (&__cancel_buf); \
if (execute) \
@@ -666,9 +708,9 @@ extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
/* Internal interface to initiate cleanup. */
extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
- __cleanup_fct_attribute __attribute ((__noreturn__))
+ __cleanup_fct_attribute __attribute__ ((__noreturn__))
# ifndef SHARED
- __attribute ((__weak__))
+ __attribute__ ((__weak__))
# endif
;
#endif
@@ -683,56 +725,135 @@ extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROW;
/* Initialize a mutex. */
extern int pthread_mutex_init (pthread_mutex_t *__mutex,
__const pthread_mutexattr_t *__mutexattr)
- __THROW;
+ __THROW __nonnull ((1));
/* Destroy a mutex. */
-extern int pthread_mutex_destroy (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_destroy (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
/* Try locking a mutex. */
-extern int pthread_mutex_trylock (pthread_mutex_t *_mutex) __THROW;
+extern int pthread_mutex_trylock (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
/* Lock a mutex. */
-extern int pthread_mutex_lock (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_lock (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
#ifdef __USE_XOPEN2K
/* Wait until lock becomes available, or specified time passes. */
extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex,
- __const struct timespec *__restrict
- __abstime) __THROW;
+ __const struct timespec *__restrict
+ __abstime) __THROW __nonnull ((1, 2));
#endif
/* Unlock a mutex. */
-extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) __THROW;
+extern int pthread_mutex_unlock (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+
+
+/* Get the priority ceiling of MUTEX. */
+extern int pthread_mutex_getprioceiling (__const pthread_mutex_t *
+ __restrict __mutex,
+ int *__restrict __prioceiling)
+ __THROW __nonnull ((1, 2));
+
+/* Set the priority ceiling of MUTEX to PRIOCEILING, return old
+ priority ceiling value in *OLD_CEILING. */
+extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
+ int __prioceiling,
+ int *__restrict __old_ceiling)
+ __THROW __nonnull ((1, 3));
+
+
+#ifdef __USE_XOPEN2K8
+/* Declare the state protected by MUTEX as consistent. */
+extern int pthread_mutex_consistent (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex)
+ __THROW __nonnull ((1));
+# endif
+#endif
/* Functions for handling mutex attributes. */
/* Initialize mutex attribute object ATTR with default attributes
(kind is PTHREAD_MUTEX_TIMED_NP). */
-extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr) __THROW;
+extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr)
+ __THROW __nonnull ((1));
/* Destroy mutex attribute object ATTR. */
-extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr) __THROW;
+extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr)
+ __THROW __nonnull ((1));
/* Get the process-shared flag of the mutex attribute ATTR. */
extern int pthread_mutexattr_getpshared (__const pthread_mutexattr_t *
__restrict __attr,
- int *__restrict __pshared) __THROW;
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
/* Set the process-shared flag of the mutex attribute ATTR. */
extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr,
- int __pshared) __THROW;
+ int __pshared)
+ __THROW __nonnull ((1));
-#ifdef __USE_UNIX98
+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8
/* Return in *KIND the mutex kind attribute in *ATTR. */
extern int pthread_mutexattr_gettype (__const pthread_mutexattr_t *__restrict
- __attr, int *__restrict __kind) __THROW;
+ __attr, int *__restrict __kind)
+ __THROW __nonnull ((1, 2));
/* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL,
PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or
PTHREAD_MUTEX_DEFAULT). */
extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
- __THROW;
+ __THROW __nonnull ((1));
+#endif
+
+/* Return in *PROTOCOL the mutex protocol attribute in *ATTR. */
+extern int pthread_mutexattr_getprotocol (__const pthread_mutexattr_t *
+ __restrict __attr,
+ int *__restrict __protocol)
+ __THROW __nonnull ((1, 2));
+
+/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either
+ PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT). */
+extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr,
+ int __protocol)
+ __THROW __nonnull ((1));
+
+/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR. */
+extern int pthread_mutexattr_getprioceiling (__const pthread_mutexattr_t *
+ __restrict __attr,
+ int *__restrict __prioceiling)
+ __THROW __nonnull ((1, 2));
+
+/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING. */
+extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr,
+ int __prioceiling)
+ __THROW __nonnull ((1));
+
+#ifdef __USE_XOPEN2K
+/* Get the robustness flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_getrobust (__const pthread_mutexattr_t *__attr,
+ int *__robustness)
+ __THROW __nonnull ((1, 2));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_getrobust_np (__const pthread_mutexattr_t *__attr,
+ int *__robustness)
+ __THROW __nonnull ((1, 2));
+# endif
+
+/* Set the robustness flag of the mutex attribute ATTR. */
+extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr,
+ int __robustness)
+ __THROW __nonnull ((1));
+# ifdef __USE_GNU
+extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr,
+ int __robustness)
+ __THROW __nonnull ((1));
+# endif
#endif
@@ -743,66 +864,77 @@ extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind)
the default values if later is NULL. */
extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock,
__const pthread_rwlockattr_t *__restrict
- __attr) __THROW;
+ __attr) __THROW __nonnull ((1));
/* Destroy read-write lock RWLOCK. */
-extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
/* Acquire read lock for RWLOCK. */
-extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
/* Try to acquire read lock for RWLOCK. */
-extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
# ifdef __USE_XOPEN2K
/* Try to acquire read lock for RWLOCK or return after specfied time. */
extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock,
__const struct timespec *__restrict
- __abstime) __THROW;
+ __abstime) __THROW __nonnull ((1, 2));
# endif
/* Acquire write lock for RWLOCK. */
-extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
/* Try to acquire write lock for RWLOCK. */
-extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
# ifdef __USE_XOPEN2K
/* Try to acquire write lock for RWLOCK or return after specfied time. */
extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock,
__const struct timespec *__restrict
- __abstime) __THROW;
+ __abstime) __THROW __nonnull ((1, 2));
# endif
/* Unlock RWLOCK. */
-extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock) __THROW;
+extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock)
+ __THROW __nonnull ((1));
/* Functions for handling read-write lock attributes. */
/* Initialize attribute object ATTR with default values. */
-extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr) __THROW;
+extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr)
+ __THROW __nonnull ((1));
/* Destroy attribute object ATTR. */
-extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr) __THROW;
+extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr)
+ __THROW __nonnull ((1));
/* Return current setting of process-shared attribute of ATTR in PSHARED. */
extern int pthread_rwlockattr_getpshared (__const pthread_rwlockattr_t *
__restrict __attr,
- int *__restrict __pshared) __THROW;
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
/* Set process-shared attribute of ATTR to PSHARED. */
extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr,
- int __pshared) __THROW;
+ int __pshared)
+ __THROW __nonnull ((1));
/* Return current setting of reader/writer preference. */
extern int pthread_rwlockattr_getkind_np (__const pthread_rwlockattr_t *
__restrict __attr,
- int *__restrict __pref) __THROW;
+ int *__restrict __pref)
+ __THROW __nonnull ((1, 2));
/* Set reader/write preference. */
extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
- int __pref) __THROW;
+ int __pref) __THROW __nonnull ((1));
#endif
@@ -812,16 +944,19 @@ extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
the default values if later is NULL. */
extern int pthread_cond_init (pthread_cond_t *__restrict __cond,
__const pthread_condattr_t *__restrict
- __cond_attr) __THROW;
+ __cond_attr) __THROW __nonnull ((1));
/* Destroy condition variable COND. */
-extern int pthread_cond_destroy (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_destroy (pthread_cond_t *__cond)
+ __THROW __nonnull ((1));
/* Wake up one thread waiting for condition variable COND. */
-extern int pthread_cond_signal (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_signal (pthread_cond_t *__cond)
+ __THROW __nonnull ((1));
/* Wake up all threads waiting for condition variables COND. */
-extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
+extern int pthread_cond_broadcast (pthread_cond_t *__cond)
+ __THROW __nonnull ((1));
/* Wait for condition variable COND to be signaled or broadcast.
MUTEX is assumed to be locked before.
@@ -829,7 +964,8 @@ extern int pthread_cond_broadcast (pthread_cond_t *__cond) __THROW;
This function is a cancellation point and therefore not marked with
__THROW. */
extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
- pthread_mutex_t *__restrict __mutex);
+ pthread_mutex_t *__restrict __mutex)
+ __nonnull ((1, 2));
/* Wait for condition variable COND to be signaled or broadcast until
ABSTIME. MUTEX is assumed to be locked before. ABSTIME is an
@@ -841,36 +977,39 @@ extern int pthread_cond_wait (pthread_cond_t *__restrict __cond,
extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond,
pthread_mutex_t *__restrict __mutex,
__const struct timespec *__restrict
- __abstime);
+ __abstime) __nonnull ((1, 2, 3));
/* Functions for handling condition variable attributes. */
/* Initialize condition variable attribute ATTR. */
-extern int pthread_condattr_init (pthread_condattr_t *__attr) __THROW;
+extern int pthread_condattr_init (pthread_condattr_t *__attr)
+ __THROW __nonnull ((1));
/* Destroy condition variable attribute ATTR. */
-extern int pthread_condattr_destroy (pthread_condattr_t *__attr) __THROW;
+extern int pthread_condattr_destroy (pthread_condattr_t *__attr)
+ __THROW __nonnull ((1));
/* Get the process-shared flag of the condition variable attribute ATTR. */
extern int pthread_condattr_getpshared (__const pthread_condattr_t *
- __restrict __attr,
- int *__restrict __pshared) __THROW;
+ __restrict __attr,
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
/* Set the process-shared flag of the condition variable attribute ATTR. */
extern int pthread_condattr_setpshared (pthread_condattr_t *__attr,
- int __pshared) __THROW;
+ int __pshared) __THROW __nonnull ((1));
#ifdef __USE_XOPEN2K
/* Get the clock selected for the conditon variable attribute ATTR. */
extern int pthread_condattr_getclock (__const pthread_condattr_t *
__restrict __attr,
__clockid_t *__restrict __clock_id)
- __THROW;
+ __THROW __nonnull ((1, 2));
/* Set the clock selected for the conditon variable attribute ATTR. */
extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
- __clockid_t __clock_id) __THROW;
-
+ __clockid_t __clock_id)
+ __THROW __nonnull ((1));
#endif
@@ -880,19 +1019,23 @@ extern int pthread_condattr_setclock (pthread_condattr_t *__attr,
/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can
be shared between different processes. */
extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared)
- __THROW;
+ __THROW __nonnull ((1));
/* Destroy the spinlock LOCK. */
-extern int pthread_spin_destroy (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_destroy (pthread_spinlock_t *__lock)
+ __THROW __nonnull ((1));
/* Wait until spinlock LOCK is retrieved. */
-extern int pthread_spin_lock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_lock (pthread_spinlock_t *__lock)
+ __THROW __nonnull ((1));
/* Try to lock spinlock LOCK. */
-extern int pthread_spin_trylock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_trylock (pthread_spinlock_t *__lock)
+ __THROW __nonnull ((1));
/* Release spinlock LOCK. */
-extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
+extern int pthread_spin_unlock (pthread_spinlock_t *__lock)
+ __THROW __nonnull ((1));
/* Functions to handle barriers. */
@@ -901,29 +1044,36 @@ extern int pthread_spin_unlock (pthread_spinlock_t *__lock) __THROW;
opened when COUNT waiters arrived. */
extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier,
__const pthread_barrierattr_t *__restrict
- __attr, unsigned int __count) __THROW;
+ __attr, unsigned int __count)
+ __THROW __nonnull ((1));
/* Destroy a previously dynamically initialized barrier BARRIER. */
-extern int pthread_barrier_destroy (pthread_barrier_t *__barrier) __THROW;
+extern int pthread_barrier_destroy (pthread_barrier_t *__barrier)
+ __THROW __nonnull ((1));
/* Wait on barrier BARRIER. */
-extern int pthread_barrier_wait (pthread_barrier_t *__barrier) __THROW;
+extern int pthread_barrier_wait (pthread_barrier_t *__barrier)
+ __THROW __nonnull ((1));
/* Initialize barrier attribute ATTR. */
-extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr) __THROW;
+extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr)
+ __THROW __nonnull ((1));
/* Destroy previously dynamically initialized barrier attribute ATTR. */
-extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr) __THROW;
+extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr)
+ __THROW __nonnull ((1));
/* Get the process-shared flag of the barrier attribute ATTR. */
extern int pthread_barrierattr_getpshared (__const pthread_barrierattr_t *
__restrict __attr,
- int *__restrict __pshared) __THROW;
+ int *__restrict __pshared)
+ __THROW __nonnull ((1, 2));
/* Set the process-shared flag of the barrier attribute ATTR. */
extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
- int __pshared) __THROW;
+ int __pshared)
+ __THROW __nonnull ((1));
#endif
@@ -936,7 +1086,8 @@ extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr,
DESTR_FUNCTION is not called if the value associated is NULL when
the key is destroyed. */
extern int pthread_key_create (pthread_key_t *__key,
- void (*__destr_function) (void *)) __THROW;
+ void (*__destr_function) (void *))
+ __THROW __nonnull ((1));
/* Destroy KEY. */
extern int pthread_key_delete (pthread_key_t __key) __THROW;
@@ -946,13 +1097,14 @@ extern void *pthread_getspecific (pthread_key_t __key) __THROW;
/* Store POINTER in the thread-specific data slot identified by KEY. */
extern int pthread_setspecific (pthread_key_t __key,
- __const void *__pointer) __THROW;
+ __const void *__pointer) __THROW ;
#ifdef __USE_XOPEN2K
/* Get ID of CPU-time clock for thread THREAD_ID. */
extern int pthread_getcpuclockid (pthread_t __thread_id,
- __clockid_t *__clock_id) __THROW;
+ __clockid_t *__clock_id)
+ __THROW __nonnull ((2));
#endif
@@ -971,6 +1123,16 @@ extern int pthread_atfork (void (*__prepare) (void),
void (*__parent) (void),
void (*__child) (void)) __THROW;
+
+#ifdef __USE_EXTERN_INLINES
+/* Optimizations. */
+__extern_inline int
+__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2))
+{
+ return __thread1 == __thread2;
+}
+#endif
+
__END_DECLS
#endif /* pthread.h */
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c b/libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c
index d21ed79b1..d1135391e 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_barrier_wait.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -25,13 +25,14 @@
/* Wait on barrier. */
int
-pthread_barrier_wait (pthread_barrier_t *barrier)
+pthread_barrier_wait (
+ pthread_barrier_t *barrier)
{
struct pthread_barrier *ibarrier = (struct pthread_barrier *) barrier;
int result = 0;
/* Make sure we are alone. */
- lll_lock (ibarrier->lock);
+ lll_lock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* One more arrival. */
--ibarrier->left;
@@ -44,7 +45,8 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
++ibarrier->curr_event;
/* Wake up everybody. */
- lll_futex_wake (&ibarrier->curr_event, INT_MAX);
+ lll_futex_wake (&ibarrier->curr_event, INT_MAX,
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* This is the thread which finished the serialization. */
result = PTHREAD_BARRIER_SERIAL_THREAD;
@@ -56,11 +58,12 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
unsigned int event = ibarrier->curr_event;
/* Before suspending, make the barrier available to others. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
/* Wait for the event counter of the barrier to change. */
do
- lll_futex_wait (&ibarrier->curr_event, event);
+ lll_futex_wait (&ibarrier->curr_event, event,
+ ibarrier->private ^ FUTEX_PRIVATE_FLAG);
while (event == ibarrier->curr_event);
}
@@ -70,7 +73,7 @@ pthread_barrier_wait (pthread_barrier_t *barrier)
/* If this was the last woken thread, unlock. */
if (atomic_increment_val (&ibarrier->left) == init_count)
/* We are done. */
- lll_unlock (ibarrier->lock);
+ lll_unlock (ibarrier->lock, ibarrier->private ^ FUTEX_PRIVATE_FLAG);
return result;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c b/libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c
index f6e83ed3f..5e7465774 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_cond_broadcast.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -23,14 +23,18 @@
#include <lowlevellock.h>
#include <pthread.h>
#include <pthreadP.h>
+
#include <bits/kernel-features.h>
int
-__pthread_cond_broadcast (pthread_cond_t *cond)
+__pthread_cond_broadcast (
+ pthread_cond_t *cond)
{
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
+ ? LLL_SHARED : LLL_PRIVATE;
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -44,7 +48,7 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
++cond->__data.__broadcast_seq;
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
/* Do not use requeue for pshared condvars. */
if (cond->__data.__mutex == (void *) ~0l)
@@ -52,15 +56,24 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
/* Wake everybody. */
pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex;
+
+ /* XXX: Kernel so far doesn't support requeue to PI futex. */
+ /* XXX: Kernel so far can only requeue to the same type of futex,
+ in this case private (we don't requeue for pshared condvars). */
+ if (__builtin_expect (mut->__data.__kind
+ & (PTHREAD_MUTEX_PRIO_INHERIT_NP
+ | PTHREAD_MUTEX_PSHARED_BIT), 0))
+ goto wake_all;
+
/* lll_futex_requeue returns 0 for success and non-zero
for errors. */
if (__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1,
INT_MAX, &mut->__data.__lock,
- futex_val), 0))
+ futex_val, LLL_PRIVATE), 0))
{
/* The requeue functionality is not available. */
wake_all:
- lll_futex_wake (&cond->__data.__futex, INT_MAX);
+ lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared);
}
/* That's all. */
@@ -68,8 +81,9 @@ __pthread_cond_broadcast (pthread_cond_t *cond)
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
return 0;
}
+
weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c b/libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c
index 5091bea87..d66f3edbb 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_cond_signal.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -23,14 +23,19 @@
#include <lowlevellock.h>
#include <pthread.h>
#include <pthreadP.h>
+
#include <bits/kernel-features.h>
int
-__pthread_cond_signal (pthread_cond_t *cond)
+__pthread_cond_signal (
+ pthread_cond_t *cond)
{
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
+ ? LLL_SHARED : LLL_PRIVATE;
+
/* Make sure we are alone. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* Are there any waiters to be woken? */
if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
@@ -40,12 +45,18 @@ __pthread_cond_signal (pthread_cond_t *cond)
++cond->__data.__futex;
/* Wake one. */
- lll_futex_wake (&cond->__data.__futex, 1);
+ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, 1,
+ 1, &cond->__data.__lock,
+ pshared), 0))
+ return 0;
+
+ lll_futex_wake (&cond->__data.__futex, 1, pshared);
}
/* We are done. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
return 0;
}
+
weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c b/libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c
index 1f4136e70..4aaf5df75 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_cond_timedwait.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -23,6 +23,7 @@
#include <lowlevellock.h>
#include <pthread.h>
#include <pthreadP.h>
+#include <bits/kernel-features.h>
/* Cleanup handler, defined in pthread_cond_wait.c. */
@@ -51,21 +52,24 @@ __pthread_cond_timedwait (
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
+ ? LLL_SHARED : LLL_PRIVATE;
+
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* Now we can release the mutex. */
int err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (err)
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
return err;
}
/* We have one new user of the condvar. */
++cond->__data.__total_seq;
++cond->__data.__futex;
- cond->__data.__nwaiters += 1 << COND_CLOCK_BITS;
+ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
/* Remember the mutex we are using here. If there is already a
different address store this is a bad user bug. Do not store
@@ -98,7 +102,7 @@ __pthread_cond_timedwait (
int ret;
ret = INTERNAL_SYSCALL (clock_gettime, err, 2,
(cond->__data.__nwaiters
- & ((1 << COND_CLOCK_BITS) - 1)),
+ & ((1 << COND_NWAITERS_SHIFT) - 1)),
&rt);
# ifndef __ASSUME_POSIX_TIMERS
if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (ret, err), 0))
@@ -144,20 +148,20 @@ __pthread_cond_timedwait (
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
/* Wait until woken by signal or broadcast. */
err = lll_futex_timed_wait (&cond->__data.__futex,
- futex_val, &rt);
+ futex_val, &rt, pshared);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock(cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -187,17 +191,17 @@ __pthread_cond_timedwait (
bc_out:
- cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
/* If pthread_cond_destroy was called on this variable already,
notify the pthread_cond_destroy caller all waiters have left
and it can be successfully destroyed. */
if (cond->__data.__total_seq == -1ULL
- && cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
- lll_futex_wake (&cond->__data.__nwaiters, 1);
+ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
+ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
@@ -207,4 +211,5 @@ __pthread_cond_timedwait (
return err ?: result;
}
+
weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c b/libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c
index 79245b7c1..2fac02d83 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_cond_wait.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -41,38 +41,46 @@ __condvar_cleanup (void *arg)
struct _condvar_cleanup_buffer *cbuffer =
(struct _condvar_cleanup_buffer *) arg;
unsigned int destroying;
+ int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l)
+ ? LLL_SHARED : LLL_PRIVATE;
/* We are going to modify shared data. */
- lll_mutex_lock (cbuffer->cond->__data.__lock);
+ lll_lock (cbuffer->cond->__data.__lock, pshared);
if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq)
{
/* This thread is not waiting anymore. Adjust the sequence counters
- appropriately. */
- ++cbuffer->cond->__data.__wakeup_seq;
+ appropriately. We do not increment WAKEUP_SEQ if this would
+ bump it over the value of TOTAL_SEQ. This can happen if a thread
+ was woken and then canceled. */
+ if (cbuffer->cond->__data.__wakeup_seq
+ < cbuffer->cond->__data.__total_seq)
+ {
+ ++cbuffer->cond->__data.__wakeup_seq;
+ ++cbuffer->cond->__data.__futex;
+ }
++cbuffer->cond->__data.__woken_seq;
- ++cbuffer->cond->__data.__futex;
}
- cbuffer->cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+ cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
/* If pthread_cond_destroy was called on this variable already,
notify the pthread_cond_destroy caller all waiters have left
and it can be successfully destroyed. */
destroying = 0;
if (cbuffer->cond->__data.__total_seq == -1ULL
- && cbuffer->cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
+ && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
{
- lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1);
+ lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared);
destroying = 1;
}
/* We are done. */
- lll_mutex_unlock (cbuffer->cond->__data.__lock);
+ lll_unlock (cbuffer->cond->__data.__lock, pshared);
/* Wake everybody to make sure no condvar signal gets lost. */
if (! destroying)
- lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX);
+ lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared);
/* Get the mutex before returning unless asynchronous cancellation
is in effect. */
@@ -88,22 +96,24 @@ __pthread_cond_wait (
struct _pthread_cleanup_buffer buffer;
struct _condvar_cleanup_buffer cbuffer;
int err;
+ int pshared = (cond->__data.__mutex == (void *) ~0l)
+ ? LLL_SHARED : LLL_PRIVATE;
/* Make sure we are along. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* Now we can release the mutex. */
err = __pthread_mutex_unlock_usercnt (mutex, 0);
if (__builtin_expect (err, 0))
{
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
return err;
}
/* We have one new user of the condvar. */
++cond->__data.__total_seq;
++cond->__data.__futex;
- cond->__data.__nwaiters += 1 << COND_CLOCK_BITS;
+ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT;
/* Remember the mutex we are using here. If there is already a
different address store this is a bad user bug. Do not store
@@ -132,19 +142,19 @@ __pthread_cond_wait (
unsigned int futex_val = cond->__data.__futex;
/* Prepare to wait. Release the condvar futex. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
/* Enable asynchronous cancellation. Required by the standard. */
cbuffer.oldtype = __pthread_enable_asynccancel ();
/* Wait until woken by signal or broadcast. */
- lll_futex_wait (&cond->__data.__futex, futex_val);
+ lll_futex_wait (&cond->__data.__futex, futex_val, pshared);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (cbuffer.oldtype);
/* We are going to look at shared data again, so get the lock. */
- lll_mutex_lock (cond->__data.__lock);
+ lll_lock (cond->__data.__lock, pshared);
/* If a broadcast happened, we are done. */
if (cbuffer.bc_seq != cond->__data.__broadcast_seq)
@@ -160,17 +170,17 @@ __pthread_cond_wait (
bc_out:
- cond->__data.__nwaiters -= 1 << COND_CLOCK_BITS;
+ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT;
/* If pthread_cond_destroy was called on this varaible already,
notify the pthread_cond_destroy caller all waiters have left
and it can be successfully destroyed. */
if (cond->__data.__total_seq == -1ULL
- && cond->__data.__nwaiters < (1 << COND_CLOCK_BITS))
- lll_futex_wake (&cond->__data.__nwaiters, 1);
+ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT))
+ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared);
/* We are done with the condvar. */
- lll_mutex_unlock (cond->__data.__lock);
+ lll_unlock (cond->__data.__lock, pshared);
/* The cancellation handling is back to normal, remove the handler. */
__pthread_cleanup_pop (&buffer, 0);
@@ -178,4 +188,5 @@ __pthread_cond_wait (
/* Get the mutex before returning. */
return __pthread_mutex_cond_lock (mutex);
}
+
weak_alias(__pthread_cond_wait, pthread_cond_wait)
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_once.c b/libpthread/nptl/sysdeps/pthread/pthread_once.c
index fc16bc535..57bb6b977 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_once.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_once.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -22,7 +22,7 @@
-static lll_lock_t once_lock = LLL_LOCK_INITIALIZER;
+static int once_lock = LLL_LOCK_INITIALIZER;
int
@@ -35,7 +35,7 @@ __pthread_once (
object. */
if (*once_control == PTHREAD_ONCE_INIT)
{
- lll_lock (once_lock);
+ lll_lock (once_lock, LLL_PRIVATE);
/* XXX This implementation is not complete. It doesn't take
cancelation and fork into account. */
@@ -46,7 +46,7 @@ __pthread_once (
*once_control = !PTHREAD_ONCE_INIT;
}
- lll_unlock (once_lock);
+ lll_unlock (once_lock, LLL_PRIVATE);
}
return 0;
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c
index 2fdcc49f9..dc00f2a08 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_rdlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -26,12 +26,13 @@
/* Acquire read lock for RWLOCK. */
int
-__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_rdlock (
+ pthread_rwlock_t *rwlock)
{
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -39,7 +40,7 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
if (rwlock->__data.__writer == 0
/* ...and if either no writer is waiting or we prefer readers. */
&& (!rwlock->__data.__nr_writers_queued
- || rwlock->__data.__flags == 0))
+ || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
/* Increment the reader counter. Avoid overflow. */
if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
@@ -73,19 +74,20 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
- lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval);
+ lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c
index 8503788c2..3daefc71f 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedrdlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -33,7 +33,7 @@ pthread_rwlock_timedrdlock (
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock(rwlock->__data.__lock);
+ lll_lock(rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -43,7 +43,7 @@ pthread_rwlock_timedrdlock (
if (rwlock->__data.__writer == 0
/* ...and if either no writer is waiting or we prefer readers. */
&& (!rwlock->__data.__nr_writers_queued
- || rwlock->__data.__flags == 0))
+ || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
{
/* Increment the reader counter. Avoid overflow. */
if (++rwlock->__data.__nr_readers == 0)
@@ -110,14 +110,14 @@ pthread_rwlock_timedrdlock (
int waitval = rwlock->__data.__readers_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__readers_wakeup,
- waitval, &rt);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
--rwlock->__data.__nr_readers_queued;
@@ -131,7 +131,7 @@ pthread_rwlock_timedrdlock (
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c
index d9caa85bb..e6fcb1640 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_timedwrlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -33,7 +33,7 @@ pthread_rwlock_timedwrlock (
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -100,14 +100,14 @@ pthread_rwlock_timedwrlock (
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
err = lll_futex_timed_wait (&rwlock->__data.__writer_wakeup,
- waitval, &rt);
+ waitval, &rt, rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
@@ -121,7 +121,7 @@ pthread_rwlock_timedwrlock (
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c
index 9cae8b6c2..a7ef71a11 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_unlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -27,7 +27,7 @@
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
if (rwlock->__data.__writer)
rwlock->__data.__writer = 0;
else
@@ -37,19 +37,21 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
if (rwlock->__data.__nr_writers_queued)
{
++rwlock->__data.__writer_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
- lll_futex_wake (&rwlock->__data.__writer_wakeup, 1);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
+ rwlock->__data.__shared);
return 0;
}
else if (rwlock->__data.__nr_readers_queued)
{
++rwlock->__data.__readers_wakeup;
- lll_mutex_unlock (rwlock->__data.__lock);
- lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
+ lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
+ rwlock->__data.__shared);
return 0;
}
}
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return 0;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c
index 1b9186fb8..81e6daa56 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_rwlock_wrlock.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
@@ -26,12 +26,13 @@
/* Acquire write lock for RWLOCK. */
int
-__pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
+__pthread_rwlock_wrlock (
+ pthread_rwlock_t *rwlock)
{
int result = 0;
/* Make sure we are along. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
while (1)
{
@@ -64,20 +65,21 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
int waitval = rwlock->__data.__writer_wakeup;
/* Free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
/* Wait for the writer or reader(s) to finish. */
- lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval);
+ lll_futex_wait (&rwlock->__data.__writer_wakeup, waitval,
+ rwlock->__data.__shared);
/* Get the lock. */
- lll_mutex_lock (rwlock->__data.__lock);
+ lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
/* To start over again, remove the thread from the writer list. */
--rwlock->__data.__nr_writers_queued;
}
/* We are done, free the lock. */
- lll_mutex_unlock (rwlock->__data.__lock);
+ lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
return result;
}
diff --git a/libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c b/libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c
index 62082f44e..7118f8a01 100644
--- a/libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c
+++ b/libpthread/nptl/sysdeps/pthread/pthread_spin_destroy.c
@@ -21,7 +21,8 @@
int
-pthread_spin_destroy (pthread_spinlock_t *lock)
+pthread_spin_destroy (
+ pthread_spinlock_t *lock)
{
/* Nothing to do. */
return 0;
diff --git a/libpthread/nptl/sysdeps/pthread/setxid.h b/libpthread/nptl/sysdeps/pthread/setxid.h
index 8ec382f40..aebdbd236 100644
--- a/libpthread/nptl/sysdeps/pthread/setxid.h
+++ b/libpthread/nptl/sysdeps/pthread/setxid.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -33,13 +33,12 @@
# define INLINE_SETXID_SYSCALL(name, nr, args...) \
({ \
int __result; \
- if (__builtin_expect (__libc_pthread_functions.ptr__nptl_setxid \
- != NULL, 0)) \
+ if (__builtin_expect (__libc_pthread_functions_init, 0)) \
{ \
struct xid_command __cmd; \
__cmd.syscall_no = __NR_##name; \
__SETXID_##nr (__cmd, args); \
- __result = __libc_pthread_functions.ptr__nptl_setxid (&__cmd); \
+ __result = PTHFCT_CALL (ptr__nptl_setxid, (&__cmd)); \
} \
else \
__result = INLINE_SYSCALL (name, nr, args); \
diff --git a/libpthread/nptl/sysdeps/pthread/sigaction.c b/libpthread/nptl/sysdeps/pthread/sigaction.c
index 54b5d2de4..20cff8941 100644
--- a/libpthread/nptl/sysdeps/pthread/sigaction.c
+++ b/libpthread/nptl/sysdeps/pthread/sigaction.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -17,22 +17,18 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-/* This is tricky. GCC doesn't like #include_next in the primary
- source file and even if it did, the first #include_next is this
- exact file anyway. */
#ifndef LIBC_SIGACTION
#include <pthreadP.h>
/* We use the libc implementation but we tell it to not allow
SIGCANCEL or SIGTIMER to be handled. */
-# define LIBC_SIGACTION 1
-
-# include <sigaction.c>
+#define LIBC_SIGACTION 1
+#include <sigaction.c>
int
sigaction (int sig, const struct sigaction *act, struct sigaction *oact);
-
+
int
__sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
{
@@ -47,6 +43,7 @@ __sigaction (int sig, const struct sigaction *act, struct sigaction *oact)
libc_hidden_proto(sigaction)
weak_alias (__sigaction, sigaction)
libc_hidden_weak(sigaction)
+
#else
# include_next <sigaction.c>
diff --git a/libpthread/nptl/sysdeps/pthread/sigfillset.c b/libpthread/nptl/sysdeps/pthread/sigfillset.c
index fbe458f55..eed75e237 100644
--- a/libpthread/nptl/sysdeps/pthread/sigfillset.c
+++ b/libpthread/nptl/sysdeps/pthread/sigfillset.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/libpthread/nptl/sysdeps/pthread/tpp.c b/libpthread/nptl/sysdeps/pthread/tpp.c
new file mode 100644
index 000000000..0325010b4
--- /dev/null
+++ b/libpthread/nptl/sysdeps/pthread/tpp.c
@@ -0,0 +1,172 @@
+/* Thread Priority Protect helpers.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2006.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <assert.h>
+#include <atomic.h>
+#include <errno.h>
+#include <pthreadP.h>
+#include <sched.h>
+#include <stdlib.h>
+
+
+int __sched_fifo_min_prio = -1;
+int __sched_fifo_max_prio = -1;
+
+void
+__init_sched_fifo_prio (void)
+{
+ __sched_fifo_max_prio = sched_get_priority_max (SCHED_FIFO);
+ atomic_write_barrier ();
+ __sched_fifo_min_prio = sched_get_priority_min (SCHED_FIFO);
+}
+
+int
+__pthread_tpp_change_priority (int previous_prio, int new_prio)
+{
+ struct pthread *self = THREAD_SELF;
+ struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
+
+ if (tpp == NULL)
+ {
+ if (__sched_fifo_min_prio == -1)
+ __init_sched_fifo_prio ();
+
+ size_t size = sizeof *tpp;
+ size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1)
+ * sizeof (tpp->priomap[0]);
+ tpp = calloc (size, 1);
+ if (tpp == NULL)
+ return ENOMEM;
+ tpp->priomax = __sched_fifo_min_prio - 1;
+ THREAD_SETMEM (self, tpp, tpp);
+ }
+
+ assert (new_prio == -1
+ || (new_prio >= __sched_fifo_min_prio
+ && new_prio <= __sched_fifo_max_prio));
+ assert (previous_prio == -1
+ || (previous_prio >= __sched_fifo_min_prio
+ && previous_prio <= __sched_fifo_max_prio));
+
+ int priomax = tpp->priomax;
+ int newpriomax = priomax;
+ if (new_prio != -1)
+ {
+ if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0)
+ return EAGAIN;
+ ++tpp->priomap[new_prio - __sched_fifo_min_prio];
+ if (new_prio > priomax)
+ newpriomax = new_prio;
+ }
+
+ if (previous_prio != -1)
+ {
+ if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0
+ && priomax == previous_prio
+ && previous_prio > new_prio)
+ {
+ int i;
+ for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i)
+ if (tpp->priomap[i - __sched_fifo_min_prio])
+ break;
+ newpriomax = i;
+ }
+ }
+
+ if (priomax == newpriomax)
+ return 0;
+
+ lll_lock (self->lock, LLL_PRIVATE);
+
+ tpp->priomax = newpriomax;
+
+ int result = 0;
+
+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
+ {
+ if (__sched_getparam (self->tid, &self->schedparam) != 0)
+ result = errno;
+ else
+ self->flags |= ATTR_FLAG_SCHED_SET;
+ }
+
+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
+ {
+ self->schedpolicy = __sched_getscheduler (self->tid);
+ if (self->schedpolicy == -1)
+ result = errno;
+ else
+ self->flags |= ATTR_FLAG_POLICY_SET;
+ }
+
+ if (result == 0)
+ {
+ struct sched_param sp = self->schedparam;
+ if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
+ {
+ if (sp.sched_priority < newpriomax)
+ sp.sched_priority = newpriomax;
+
+ if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
+ result = errno;
+ }
+ }
+
+ lll_unlock (self->lock, LLL_PRIVATE);
+
+ return result;
+}
+
+int
+__pthread_current_priority (void)
+{
+ struct pthread *self = THREAD_SELF;
+ if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
+ == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
+ return self->schedparam.sched_priority;
+
+ int result = 0;
+
+ lll_lock (self->lock, LLL_PRIVATE);
+
+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
+ {
+ if (__sched_getparam (self->tid, &self->schedparam) != 0)
+ result = -1;
+ else
+ self->flags |= ATTR_FLAG_SCHED_SET;
+ }
+
+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
+ {
+ self->schedpolicy = __sched_getscheduler (self->tid);
+ if (self->schedpolicy == -1)
+ result = -1;
+ else
+ self->flags |= ATTR_FLAG_POLICY_SET;
+ }
+
+ if (result != -1)
+ result = self->schedparam.sched_priority;
+
+ lll_unlock (self->lock, LLL_PRIVATE);
+
+ return result;
+}
diff --git a/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c
index 8b1f24407..e058604f3 100644
--- a/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c
+++ b/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2005, 2006, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>.
@@ -21,10 +21,10 @@
#include <stdio.h>
#include <unwind.h>
#include <pthreadP.h>
+#include <sysdep.h>
+#include <libgcc_s.h>
-#define __libc_dlopen(x) dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym dlsym
-
+static void *libgcc_s_handle;
static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
static _Unwind_Reason_Code (*libgcc_s_personality)
(int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
@@ -34,15 +34,23 @@ static _Unwind_Reason_Code (*libgcc_s_forcedunwind)
static _Unwind_Word (*libgcc_s_getcfa) (struct _Unwind_Context *);
void
+__attribute_noinline__
pthread_cancel_init (void)
{
- void *resume, *personality, *forcedunwind, *getcfa;
+ void *resume;
+ void *personality;
+ void *forcedunwind;
+ void *getcfa;
void *handle;
- if (__builtin_expect (libgcc_s_getcfa != NULL, 1))
- return;
+ if (__builtin_expect (libgcc_s_handle != NULL, 1))
+ {
+ /* Force gcc to reload all values. */
+ __asm__ volatile ("" ::: "memory");
+ return;
+ }
- handle = __libc_dlopen ("libgcc_s.so.1");
+ handle = __libc_dlopen (LIBGCC_S_SO);
if (handle == NULL
|| (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
@@ -55,22 +63,46 @@ pthread_cancel_init (void)
#endif
)
{
- printf("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
+ printf (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");
abort();
}
+ PTR_MANGLE (resume);
libgcc_s_resume = resume;
+ PTR_MANGLE (personality);
libgcc_s_personality = personality;
+ PTR_MANGLE (forcedunwind);
libgcc_s_forcedunwind = forcedunwind;
+ PTR_MANGLE (getcfa);
libgcc_s_getcfa = getcfa;
+ /* Make sure libgcc_s_handle is written last. Otherwise,
+ pthread_cancel_init might return early even when the pointer the
+ caller is interested in is not initialized yet. */
+ atomic_write_barrier ();
+ libgcc_s_handle = handle;
+}
+
+void
+__libc_freeres_fn_section
+__unwind_freeres (void)
+{
+ void *handle = libgcc_s_handle;
+ if (handle != NULL)
+ {
+ libgcc_s_handle = NULL;
+ __libc_dlclose (handle);
+ }
}
void
_Unwind_Resume (struct _Unwind_Exception *exc)
{
- if (__builtin_expect (libgcc_s_resume == NULL, 0))
+ if (__builtin_expect (libgcc_s_handle == NULL, 0))
pthread_cancel_init ();
- libgcc_s_resume (exc);
+
+ void (*resume) (struct _Unwind_Exception *exc) = libgcc_s_resume;
+ PTR_DEMANGLE (resume);
+ resume (exc);
}
_Unwind_Reason_Code
@@ -79,25 +111,37 @@ __gcc_personality_v0 (int version, _Unwind_Action actions,
struct _Unwind_Exception *ue_header,
struct _Unwind_Context *context)
{
- if (__builtin_expect (libgcc_s_personality == NULL, 0))
+ if (__builtin_expect (libgcc_s_handle == NULL, 0))
pthread_cancel_init ();
- return libgcc_s_personality (version, actions, exception_class,
- ue_header, context);
+
+ _Unwind_Reason_Code (*personality)
+ (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *) = libgcc_s_personality;
+ PTR_DEMANGLE (personality);
+ return personality (version, actions, exception_class, ue_header, context);
}
_Unwind_Reason_Code
_Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,
void *stop_argument)
{
- if (__builtin_expect (libgcc_s_forcedunwind == NULL, 0))
+ if (__builtin_expect (libgcc_s_handle == NULL, 0))
pthread_cancel_init ();
- return libgcc_s_forcedunwind (exc, stop, stop_argument);
+
+ _Unwind_Reason_Code (*forcedunwind)
+ (struct _Unwind_Exception *, _Unwind_Stop_Fn, void *)
+ = libgcc_s_forcedunwind;
+ PTR_DEMANGLE (forcedunwind);
+ return forcedunwind (exc, stop, stop_argument);
}
_Unwind_Word
_Unwind_GetCFA (struct _Unwind_Context *context)
{
- if (__builtin_expect (libgcc_s_getcfa == NULL, 0))
+ if (__builtin_expect (libgcc_s_handle == NULL, 0))
pthread_cancel_init ();
- return libgcc_s_getcfa (context);
+
+ _Unwind_Word (*getcfa) (struct _Unwind_Context *) = libgcc_s_getcfa;
+ PTR_DEMANGLE (getcfa);
+ return getcfa (context);
}
diff --git a/libpthread/nptl/sysdeps/pthread/unwind-resume.c b/libpthread/nptl/sysdeps/pthread/unwind-resume.c
index 018d2fd2f..3ca2fd8fc 100644
--- a/libpthread/nptl/sysdeps/pthread/unwind-resume.c
+++ b/libpthread/nptl/sysdeps/pthread/unwind-resume.c
@@ -21,6 +21,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <unwind.h>
+#include <libgcc_s.h>
static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
static _Unwind_Reason_Code (*libgcc_s_personality)
@@ -33,17 +34,16 @@ void abort(void);
static void
init (void)
{
- void *resume = NULL;
- void *personality = NULL;
+ void *resume, *personality;
void *handle;
- resume = personality = NULL; /* make gcc silent */
- handle = dlopen ("libgcc_s.so.1", (RTLD_LOCAL | RTLD_LAZY));
+ resume = personality = NULL;
+ handle = dlopen (LIBGCC_S_SO, (RTLD_LOCAL | RTLD_LAZY));
if (handle == NULL
|| (resume = dlsym (handle, "_Unwind_Resume")) == NULL
|| (personality = dlsym (handle, "__gcc_personality_v0")) == NULL)
{
- printf("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
+ printf (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");
abort();
}
diff --git a/libpthread/nptl/sysdeps/sh/tcb-offsets.sym b/libpthread/nptl/sysdeps/sh/tcb-offsets.sym
index 539789a81..753b72b2d 100644
--- a/libpthread/nptl/sysdeps/sh/tcb-offsets.sym
+++ b/libpthread/nptl/sysdeps/sh/tcb-offsets.sym
@@ -9,3 +9,7 @@ CLEANUP_JMP_BUF offsetof (struct pthread, cleanup_jmp_buf)
MULTIPLE_THREADS_OFFSET offsetof (struct pthread, header.multiple_threads)
TLS_PRE_TCB_SIZE sizeof (struct pthread)
MUTEX_FUTEX offsetof (pthread_mutex_t, __data.__lock)
+POINTER_GUARD offsetof (tcbhead_t, pointer_guard)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX offsetof (struct pthread, header.private_futex)
+#endif
diff --git a/libpthread/nptl/sysdeps/sh/tls.h b/libpthread/nptl/sysdeps/sh/tls.h
index 5e5ce12b0..2c538eded 100644
--- a/libpthread/nptl/sysdeps/sh/tls.h
+++ b/libpthread/nptl/sysdeps/sh/tls.h
@@ -1,5 +1,5 @@
/* Definition for thread-local data handling. NPTL/SH version.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,6 +24,10 @@
# include <stdbool.h>
# include <stddef.h>
# include <stdint.h>
+# include <stdlib.h>
+# include <list.h>
+# include <sysdep.h>
+# include <bits/kernel-features.h>
/* Type for the dtv. */
typedef union dtv
@@ -39,7 +43,7 @@ typedef union dtv
typedef struct
{
dtv_t *dtv;
- void *private;
+ uintptr_t pointer_guard;
} tcbhead_t;
# define TLS_MULTIPLE_THREADS_IN_TCB 1
@@ -52,9 +56,9 @@ typedef struct
/* We require TLS support in the tools. */
#define HAVE_TLS_SUPPORT
#define HAVE___THREAD 1
-#define HAVE_TLS_MODEL_ATTRIBUTE 1
+#define HAVE_TLS_MODEL_ATTRIBUTE 1
/* Signal that TLS support is available. */
-# define USE_TLS 1
+# define USE_TLS 1
#ifndef __ASSEMBLER__
@@ -115,9 +119,9 @@ typedef struct
struct pthread *self = thread_self();
do not get optimized away. */
# define THREAD_SELF \
- ({ struct pthread *__thread_self; \
- __asm ("stc gbr,%0" : "=r" (__thread_self)); \
- __thread_self - 1;})
+ ({ struct pthread *__self; \
+ __asm ("stc gbr,%0" : "=r" (__self)); \
+ __self - 1;})
/* Magic for libthread_db to know how to do THREAD_SELF. */
# define DB_THREAD_SELF \
@@ -137,6 +141,42 @@ typedef struct
# define THREAD_SETMEM_NC(descr, member, idx, value) \
descr->member[idx] = (value)
+#define THREAD_GET_POINTER_GUARD() \
+ ({ tcbhead_t *__tcbp; \
+ __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __tcbp->pointer_guard;})
+ #define THREAD_SET_POINTER_GUARD(value) \
+ ({ tcbhead_t *__tcbp; \
+ __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ __tcbp->pointer_guard = (value);})
+#define THREAD_COPY_POINTER_GUARD(descr) \
+ ({ tcbhead_t *__tcbp; \
+ __asm __volatile ("stc gbr,%0" : "=r" (__tcbp)); \
+ ((tcbhead_t *) (descr + 1))->pointer_guard = __tcbp->pointer_guard;})
+
+/* Get and set the global scope generation counter in struct pthread. */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED 1
+#define THREAD_GSCOPE_FLAG_WAIT 2
+#define THREAD_GSCOPE_RESET_FLAG() \
+ do \
+ { int __res \
+ = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ THREAD_GSCOPE_FLAG_UNUSED); \
+ if (__res == THREAD_GSCOPE_FLAG_WAIT) \
+ lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+ do \
+ { \
+ THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
+ atomic_write_barrier (); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_WAIT() \
+ GL(dl_wait_lookup_done) ()
+
#endif /* __ASSEMBLER__ */
#endif /* tls.h */
diff --git a/libpthread/nptl/sysdeps/sparc/tcb-offsets.sym b/libpthread/nptl/sysdeps/sparc/tcb-offsets.sym
index 237f975b2..923af8a5b 100644
--- a/libpthread/nptl/sysdeps/sparc/tcb-offsets.sym
+++ b/libpthread/nptl/sysdeps/sparc/tcb-offsets.sym
@@ -2,5 +2,6 @@
#include <tls.h>
MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads)
+POINTER_GUARD offsetof (tcbhead_t, pointer_guard)
PID offsetof (struct pthread, pid)
TID offsetof (struct pthread, tid)
diff --git a/libpthread/nptl/sysdeps/sparc/tls.h b/libpthread/nptl/sysdeps/sparc/tls.h
index e5d27fb57..e93542c9f 100644
--- a/libpthread/nptl/sysdeps/sparc/tls.h
+++ b/libpthread/nptl/sysdeps/sparc/tls.h
@@ -1,5 +1,5 @@
/* Definitions for thread-local data handling. NPTL/sparc version.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -26,6 +26,7 @@
# include <stdint.h>
# include <stdlib.h>
# include <list.h>
+# include <bits/kernel-features.h>
/* Type for the dtv. */
typedef union dtv
@@ -45,8 +46,18 @@ typedef struct
dtv_t *dtv;
void *self;
int multiple_threads;
+#if __WORDSIZE == 64
+ int gscope_flag;
+#endif
uintptr_t sysinfo;
uintptr_t stack_guard;
+ uintptr_t pointer_guard;
+#if __WORDSIZE != 64
+ int gscope_flag;
+#endif
+#ifndef __ASSUME_PRIVATE_FUTEX
+ int private_futex;
+#endif
} tcbhead_t;
#else /* __ASSEMBLER__ */
@@ -59,12 +70,15 @@ typedef struct
#define HAVE_TLS_MODEL_ATTRIBUTE 1
/* Signal that TLS support is available. */
-#define USE_TLS 1
+#define USE_TLS 1
#ifndef __ASSEMBLER__
/* Get system call information. */
# include <sysdep.h>
+/* Get the thread descriptor definition. */
+# include <descr.h>
+
register struct pthread *__thread_self __asm__("%g7");
/* This is the size of the initial TCB. Can't be just sizeof (tcbhead_t),
@@ -81,9 +95,6 @@ register struct pthread *__thread_self __asm__("%g7");
/* Alignment requirements for the TCB. */
# define TLS_TCB_ALIGN __alignof__ (struct pthread)
-/* Get the thread descriptor definition. */
-# include <descr.h>
-
/* The TCB can have any size and the memory following the address the
thread pointer points to is unspecified. Allocate the TCB there. */
# define TLS_TCB_AT_TP 1
@@ -134,6 +145,37 @@ register struct pthread *__thread_self __asm__("%g7");
((descr)->header.stack_guard \
= THREAD_GETMEM (THREAD_SELF, header.stack_guard))
+/* Get/set the stack guard field in TCB head. */
+#define THREAD_GET_POINTER_GUARD() \
+ THREAD_GETMEM (THREAD_SELF, header.pointer_guard)
+#define THREAD_SET_POINTER_GUARD(value) \
+ THREAD_SETMEM (THREAD_SELF, header.pointer_guard, value)
+# define THREAD_COPY_POINTER_GUARD(descr) \
+ ((descr)->header.pointer_guard = THREAD_GET_POINTER_GUARD ())
+
+/* Get and set the global scope generation counter in struct pthread. */
+#define THREAD_GSCOPE_FLAG_UNUSED 0
+#define THREAD_GSCOPE_FLAG_USED 1
+#define THREAD_GSCOPE_FLAG_WAIT 2
+#define THREAD_GSCOPE_RESET_FLAG() \
+ do \
+ { int __res \
+ = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ THREAD_GSCOPE_FLAG_UNUSED); \
+ if (__res == THREAD_GSCOPE_FLAG_WAIT) \
+ lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_SET_FLAG() \
+ do \
+ { \
+ THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \
+ atomic_write_barrier (); \
+ } \
+ while (0)
+#define THREAD_GSCOPE_WAIT() \
+ GL(dl_wait_lookup_done) ()
+
#endif /* !ASSEMBLER */
#endif /* tls.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in b/libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in
index 6c491b6e5..fa73a8a78 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/Makefile.in
@@ -10,6 +10,7 @@ libpthread_CSRC = pthread_attr_getaffinity.c \
pthread_getcpuclockid.c pthread_kill.c \
pthread_mutex_cond_lock.c pthread_setaffinity.c \
pthread_yield.c sem_post.c sem_timedwait.c \
+ pthread_sigqueue.c \
sem_trywait.c sem_wait.c pt-fork.c \
sigtimedwait.c sigwaitinfo.c sigwait.c pt-sleep.c
@@ -148,7 +149,10 @@ headers_clean-y += nptl_linux_headers_clean
CFLAGS-lowlevelbarrier.c = -S
CFLAGS-lowlevelcond.c = -S
CFLAGS-lowlevelrwlock.c = -S
+CFLAGS-lowlevelrobustlock.c = -S
CFLAGS-unwindbuf.c = -S
+CFLAGS-structsem.c = -S
+CFLAGS-pthread-pi-defines.c = -S
$(PTHREAD_LINUX_OUT)/lowlevelbarrier.c: $(PTHREAD_LINUX_DIR)/lowlevelbarrier.sym
$(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
@@ -159,9 +163,19 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.c: $(PTHREAD_LINUX_DIR)/lowlevelcond.sym
$(PTHREAD_LINUX_OUT)/lowlevelrwlock.c: $(PTHREAD_LINUX_DIR)/lowlevelrwlock.sym
$(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.c: $(PTHREAD_LINUX_DIR)/lowlevelrobustlock.sym
+ $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
$(PTHREAD_LINUX_OUT)/unwindbuf.c: $(PTHREAD_LINUX_DIR)/unwindbuf.sym
$(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+$(PTHREAD_LINUX_OUT)/structsem.c: $(PTHREAD_LINUX_DIR)/structsem.sym
+ $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.c: $(PTHREAD_LINUX_DIR)/pthread-pi-defines.sym
+ $(do_awk) $(top_srcdir)extra/scripts/gen-as-const.awk $< > $@
+
+
$(PTHREAD_LINUX_OUT)/lowlevelbarrier.s: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.c
$(compile.c)
@@ -171,9 +185,19 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.s: $(PTHREAD_LINUX_OUT)/lowlevelcond.c
$(PTHREAD_LINUX_OUT)/lowlevelrwlock.s: $(PTHREAD_LINUX_OUT)/lowlevelrwlock.c
$(compile.c)
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.s: $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.c
+ $(compile.c)
+
$(PTHREAD_LINUX_OUT)/unwindbuf.s: $(PTHREAD_LINUX_OUT)/unwindbuf.c
$(compile.c)
+$(PTHREAD_LINUX_OUT)/structsem.s: $(PTHREAD_LINUX_OUT)/structsem.c
+ $(compile.c)
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.s: $(PTHREAD_LINUX_OUT)/pthread-pi-defines.c
+ $(compile.c)
+
+
$(PTHREAD_LINUX_OUT)/lowlevelbarrier.h: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.s
$(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
@@ -183,13 +207,25 @@ $(PTHREAD_LINUX_OUT)/lowlevelcond.h: $(PTHREAD_LINUX_OUT)/lowlevelcond.s
$(PTHREAD_LINUX_OUT)/lowlevelrwlock.h: $(PTHREAD_LINUX_OUT)/lowlevelrwlock.s
$(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+$(PTHREAD_LINUX_OUT)/lowlevelrobustlock.h: $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.s
+ $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
$(PTHREAD_LINUX_OUT)/unwindbuf.h: $(PTHREAD_LINUX_OUT)/unwindbuf.s
$(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+$(PTHREAD_LINUX_OUT)/structsem.h: $(PTHREAD_LINUX_OUT)/structsem.s
+ $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
+$(PTHREAD_LINUX_OUT)/pthread-pi-defines.h: $(PTHREAD_LINUX_OUT)/pthread-pi-defines.s
+ $(do_sed) -n "s/^.*@@@name@@@\([^@]*\)@@@value@@@[^0-9Xxa-fA-F-]*\([0-9Xxa-fA-F-][0-9Xxa-fA-F-]*\).*@@@end@@@.*$\/#define \1 \2/p" $< > $@
+
nptl_linux_headers: $(PTHREAD_LINUX_OUT)/lowlevelbarrier.h \
$(PTHREAD_LINUX_OUT)/lowlevelcond.h \
$(PTHREAD_LINUX_OUT)/lowlevelrwlock.h \
- $(PTHREAD_LINUX_OUT)/unwindbuf.h
+ $(PTHREAD_LINUX_OUT)/lowlevelrobustlock.h \
+ $(PTHREAD_LINUX_OUT)/unwindbuf.h \
+ $(PTHREAD_LINUX_OUT)/structsem.h \
+ $(PTHREAD_LINUX_OUT)/pthread-pi-defines.h
HEADERS_BITS_PTHREAD := $(notdir $(wildcard $(PTHREAD_LINUX_DIR)/bits/*.h))
ALL_HEADERS_BITS_PTHREAD := $(addprefix include/bits/,$(HEADERS_BITS_PTHREAD))
@@ -201,7 +237,10 @@ nptl_linux_headers_clean:
$(do_rm) $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelbarrier., c h s) \
$(addprefix $(PTHREAD_LINUX_OUT)/lowlevelcond., c h s) \
$(addprefix $(PTHREAD_LINUX_OUT)/lowlevelrwlock., c h s) \
- $(addprefix $(PTHREAD_LINUX_OUT)/unwindbuf., c h s)
+ $(addprefix $(PTHREAD_LINUX_OUT)/lowlevelrobustlock., c h s) \
+ $(addprefix $(PTHREAD_LINUX_OUT)/unwindbuf., c h s) \
+ $(addprefix $(PTHREAD_LINUX_OUT)/structsem., c h s) \
+ $(addprefix $(PTHREAD_LINUX_OUT)/pthread-pi-defines., c h s)
nptl_linux_clean:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions
deleted file mode 100644
index 437c4da28..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/Versions
+++ /dev/null
@@ -1,13 +0,0 @@
-libpthread {
- GLIBC_2.3.3 {
- # Changed PTHREAD_STACK_MIN.
- pthread_attr_setstack; pthread_attr_setstacksize;
- }
-}
-librt {
- GLIBC_2.3.3 {
- # Changed timer_t.
- timer_create; timer_delete; timer_getoverrun; timer_gettime;
- timer_settime;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h
index e0718780c..a7c9740a0 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/local_lim.h
@@ -1,5 +1,5 @@
/* Minimum guaranteed maximum values for system limits. Linux/Alpha version.
- Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+ Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
#ifndef OPEN_MAX
# define __undef_OPEN_MAX
#endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
/* The kernel sources contain a file with all the needed information. */
#include <linux/limits.h>
@@ -50,6 +53,11 @@
# undef OPEN_MAX
# undef __undef_OPEN_MAX
#endif
+/* Have to remove ARG_MAX? */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
/* The number of data keys per process. */
#define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
/* Maximum message queue priority level. */
#define MQ_PRIO_MAX 32768
+
+/* Maximum value the semaphore can have. */
+#define SEM_VALUE_MAX (2147483647)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
index 0f3bf838e..41c0be197 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
@@ -1,5 +1,5 @@
/* Machine-specific pthread type layouts. Alpha version.
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -65,7 +65,7 @@ typedef union
int __kind;
int __spins;
__pthread_list_t __list;
-#define __PTHREAD_MUTEX_HAVE_PREV 1
+#define __PTHREAD_MUTEX_HAVE_PREV 1
} __data;
char __size[__SIZEOF_PTHREAD_MUTEX_T];
long int __align;
@@ -126,9 +126,9 @@ typedef union
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
int __writer;
- int __pad1;
+ int __shared;
+ unsigned long int __pad1;
unsigned long int __pad2;
- unsigned long int __pad3;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h
index 6dadfda20..be4469c69 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/bits/semaphore.h
@@ -26,9 +26,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
index fd4a7ca4b..b7f4de338 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,161 +24,250 @@
#include <bits/pthreadtypes.h>
#include <atomic.h>
#include <sysdep.h>
+#include <bits/kernel-features.h>
-#define __NR_futex 394
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
-#define lll_futex_wait(futexp, val) \
+#define FUTEX_WAKE_OP 5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
+
+
+#define lll_futex_wait(futexp, val, private) \
+ lll_futex_timed_wait (futexp, val, NULL, private)
+
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), 0); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAIT, private), \
+ (val), (timespec)); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), (timespec)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAKE, private), \
+ (nr), 0); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
})
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1, private); \
+ } \
+ while (0)
+
+/* Returns non-zero if error happened, zero if success. */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAKE, (nr), 0); \
- INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret; \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+ (nr_wake), (nr_move), (mutex), (val)); \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 6, \
- (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \
- (nr_move), (mutex), (val)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
+ (nr_wake), (nr_wake2), (futexp2), \
+ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
+
+
static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
}
-#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
+#define lll_trylock(lock) __lll_trylock (&(lock))
static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
}
-#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
-extern void __lll_lock_wait (int *futex) attribute_hidden;
+static inline int __attribute__((always_inline))
+__lll_robust_trylock(int *futex, int id)
+{
+ return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
+}
+#define lll_robust_trylock(lock, id) \
+ __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+__lll_lock(int *futex, int private)
{
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- __lll_lock_wait (futex);
+ {
+ if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+ __lll_lock_wait_private (futex);
+ else
+ __lll_lock_wait (futex, private);
+ }
+}
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+static inline int __attribute__ ((always_inline))
+__lll_robust_lock (int *futex, int id, int private)
+{
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_lock_wait (futex, private);
+ return result;
}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
- __lll_lock_wait (futex);
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- result = __lll_timedlock_wait (futex, abstime);
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
+static inline int __attribute__ ((always_inline))
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+ int id, int private)
{
- int val = atomic_exchange_rel (futex, 0);
- if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1);
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime, private);
+ return result;
}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
- (void) atomic_exchange_rel (futex, 0);
- lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
+#define __lll_unlock(futex, private) \
+ (void) \
+ ({ int *__futex = (futex); \
+ int __oldval = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__oldval > 1, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ })
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
-#define lll_mutex_islocked(futex) \
- (futex != 0)
+#define __lll_robust_unlock(futex, private) \
+ (void) \
+ ({ int *__futex = (futex); \
+ int __oldval = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__oldval & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ })
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
-/* Our internal lock implementation is identical to the binary-compatible
- mutex implementation. */
-/* Type for lock object. */
-typedef int lll_lock_t;
+#define lll_islocked(futex) \
+ (futex != 0)
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- >1 - taken by more users */
-
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
afterwards. */
#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid); \
+ do { \
+ __typeof (tid) __tid; \
+ while ((__tid = (tid)) != 0) \
+ lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -192,26 +281,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c
index 79a3c47ae..0e7e9790d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@ clear_once_control (void *arg)
pthread_once_t *once_control = (pthread_once_t *) arg;
*once_control = 0;
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
}
int
@@ -72,7 +72,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
break;
/* Same generation, some other thread was faster. Wait. */
- lll_futex_wait (once_control, oldval);
+ lll_futex_wait (once_control, oldval, LLL_PRIVATE);
}
/* This thread is the first here. Do the initialization.
@@ -88,7 +88,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
atomic_increment (once_control);
/* Wake up all other threads. */
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h
index 49224079e..7049b3607 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -55,6 +55,7 @@ __LABEL(name) \
bne a3, SYSCALL_ERROR_LABEL; \
__LABEL($pseudo_ret) \
.subsection 2; \
+ cfi_startproc; \
__LABEL($pseudo_cancel) \
subq sp, 64, sp; \
cfi_def_cfa_offset(64); \
@@ -90,12 +91,13 @@ __LABEL($multi_error) \
cfi_def_cfa_offset(0); \
__LABEL($syscall_error) \
SYSCALL_ERROR_HANDLER; \
+ cfi_endproc; \
.previous
# undef PSEUDO_END
# define PSEUDO_END(sym) \
- .subsection 2; \
cfi_endproc; \
+ .subsection 2; \
.size sym, .-sym
# define SAVE_ARGS_0 /* Nothing. */
@@ -142,7 +144,7 @@ __LABEL($syscall_error) \
extern int __local_multiple_threads attribute_hidden;
# define SINGLE_THREAD_P \
__builtin_expect (__local_multiple_threads == 0, 1)
-# elif defined(PIC)
+# elif defined(__PIC__)
# define SINGLE_THREAD_P(reg) ldl reg, __local_multiple_threads(gp) !gprel
# else
# define SINGLE_THREAD_P(reg) \
@@ -167,3 +169,9 @@ extern int __local_multiple_threads attribute_hidden;
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S
index f0c39982c..f4ed9311b 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/vfork.S
@@ -42,5 +42,5 @@ PSEUDO (__vfork, vfork, 0)
1: ret
PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h
index 49a935a52..b0586ea1e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h
@@ -37,61 +37,21 @@ typedef uintmax_t uatomic_max_t;
void __arm_link_error (void);
-#ifdef __thumb__
-
-/* Note that to allow efficient implementation the arguemtns are reversed
- relative to atomic_exchange_acq. */
-int __thumb_swpb (int newvalue, void *mem)
- attribute_hidden;
-unsigned int __thumb_swp (unsigned int newvalue, void *mem)
- attribute_hidden;
-unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *mem)
- attribute_hidden;
-
-#define atomic_exchange_acq(mem, newvalue) \
- ({ __typeof (*mem) result; \
- if (sizeof (*mem) == 1) \
- result = __thumb_swpb (newvalue, mem); \
- else if (sizeof (*mem) == 4) \
- result = __thumb_swp (newvalue, mem); \
- else \
- { \
- result = 0; \
- abort (); \
- } \
- result; })
-
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ((__typeof (oldval)) __thumb_cmpxchg (oldval, newval, mem))
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-
+#ifdef __thumb2__
+#define atomic_full_barrier() \
+ __asm__ __volatile__ \
+ ("movw\tip, #0x0fa0\n\t" \
+ "movt\tip, #0xffff\n\t" \
+ "blx\tip" \
+ : : : "ip", "lr", "cc", "memory");
#else
-/* ARM mode. */
-
-#define atomic_exchange_acq(mem, newvalue) \
- ({ __typeof (*mem) _xchg_result; \
- if (sizeof (*mem) == 1) \
- __asm__ __volatile__ ("swpb %0, %1, [%2]" \
- : "=&r,&r" (_xchg_result) \
- : "r,0" (newvalue), "r,r" (mem) : "memory"); \
- else if (sizeof (*mem) == 4) \
- __asm__ __volatile__ ("swp %0, %1, [%2]" \
- : "=&r,&r" (_xchg_result) \
- : "r,0" (newvalue), "r,r" (mem) : "memory"); \
- else \
- { \
- _xchg_result = 0; \
- abort (); \
- } \
- _xchg_result; })
+#define atomic_full_barrier() \
+ __asm__ __volatile__ \
+ ("mov\tip, #0xffff0fff\n\t" \
+ "mov\tlr, pc\n\t" \
+ "add\tpc, ip, #(0xffff0fa0 - 0xffff0fff)" \
+ : : : "ip", "lr", "cc", "memory");
+#endif
/* Atomic compare and exchange. This sequence relies on the kernel to
provide a compare and exchange operation which is atomic on the
@@ -108,6 +68,9 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me
specify one to work around GCC PR rtl-optimization/21223. Otherwise
it may cause a_oldval or a_tmp to be moved to a different register. */
+#ifdef __thumb2__
+/* Thumb-2 has ldrex/strex. However it does not have barrier instructions,
+ so we still need to use the kernel helper. */
#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
({ register __typeof (oldval) a_oldval asm ("r0"); \
register __typeof (oldval) a_newval asm ("r1") = (newval); \
@@ -115,22 +78,45 @@ unsigned int __thumb_cmpxchg (unsigned int oldval, unsigned int newval, void *me
register __typeof (oldval) a_tmp asm ("r3"); \
register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
__asm__ __volatile__ \
- ("0:\tldr\t%1,[%3]\n\t" \
- "cmp\t%1, %4\n\t" \
+ ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
+ "cmp\t%[tmp], %[old2]\n\t" \
"bne\t1f\n\t" \
- "mov\t%0, %4\n\t" \
- "mov\t%1, #0xffff0fff\n\t" \
+ "mov\t%[old], %[old2]\n\t" \
+ "movw\t%[tmp], #0x0fc0\n\t" \
+ "movt\t%[tmp], #0xffff\n\t" \
+ "blx\t%[tmp]\n\t" \
+ "bcc\t0b\n\t" \
+ "mov\t%[tmp], %[old2]\n\t" \
+ "1:" \
+ : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \
+ : [new] "r" (a_newval), [ptr] "r" (a_ptr), \
+ [old2] "r" (a_oldval2) \
+ : "ip", "lr", "cc", "memory"); \
+ a_tmp; })
+#else
+#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+ ({ register __typeof (oldval) a_oldval asm ("r0"); \
+ register __typeof (oldval) a_newval asm ("r1") = (newval); \
+ register __typeof (mem) a_ptr asm ("r2") = (mem); \
+ register __typeof (oldval) a_tmp asm ("r3"); \
+ register __typeof (oldval) a_oldval2 asm ("r4") = (oldval); \
+ __asm__ __volatile__ \
+ ("0:\tldr\t%[tmp],[%[ptr]]\n\t" \
+ "cmp\t%[tmp], %[old2]\n\t" \
+ "bne\t1f\n\t" \
+ "mov\t%[old], %[old2]\n\t" \
+ "mov\t%[tmp], #0xffff0fff\n\t" \
"mov\tlr, pc\n\t" \
- "add\tpc, %1, #(0xffff0fc0 - 0xffff0fff)\n\t" \
+ "add\tpc, %[tmp], #(0xffff0fc0 - 0xffff0fff)\n\t" \
"bcc\t0b\n\t" \
- "mov\t%1, %4\n\t" \
+ "mov\t%[tmp], %[old2]\n\t" \
"1:" \
- : "=&r" (a_oldval), "=&r" (a_tmp) \
- : "r" (a_newval), "r" (a_ptr), "r" (a_oldval2) \
+ : [old] "=&r" (a_oldval), [tmp] "=&r" (a_tmp) \
+ : [new] "r" (a_newval), [ptr] "r" (a_ptr), \
+ [old2] "r" (a_oldval2) \
: "ip", "lr", "cc", "memory"); \
a_tmp; })
+#endif
#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
({ __arm_link_error (); oldval; })
-
-#endif /* __thumb__ */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h
index ea8d6a2f0..e1b115c8c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/pthreadtypes.h
@@ -19,6 +19,8 @@
#ifndef _BITS_PTHREADTYPES_H
#define _BITS_PTHREADTYPES_H 1
+#include <endian.h>
+
#define __SIZEOF_PTHREAD_ATTR_T 36
#define __SIZEOF_PTHREAD_MUTEX_T 24
#define __SIZEOF_PTHREAD_MUTEXATTR_T 4
@@ -126,9 +128,21 @@ typedef union
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __shared;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned char __flags;
+#else
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
+#endif
int __writer;
} __data;
char __size[__SIZEOF_PTHREAD_RWLOCK_T];
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h
index 3fc647d31..dadfac2af 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/semaphore.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -27,9 +27,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX ((int) ((~0u) >> 1))
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c
index 74be18855..60ccf7700 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.c
@@ -1,5 +1,5 @@
/* low level locking for pthread library. Generic futex-using version.
- Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -22,8 +22,36 @@
#include <lowlevellock.h>
#include <sys/time.h>
+void
+__lll_lock_wait_private (int *futex)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
+/* These functions don't get included in libc.so */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, private);
+ }
+ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+}
+
+
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
struct timespec rt;
@@ -55,23 +83,10 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
if (rt.tv_sec < 0)
return ETIMEDOUT;
- lll_futex_timed_wait (futex, 2, &rt);
+ // XYZ: Lost the lock to check whether it was private.
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
- while (atomic_exchange_acq (futex, 2) != 0);
-
- return 0;
-}
-
-
-/* These don't get included in libc.so */
-#ifdef IS_IN_libpthread
-int
-lll_unlock_wake_cb (int *futex)
-{
- int val = atomic_exchange_rel (futex, 0);
-
- if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1);
+ while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
return 0;
}
@@ -108,11 +123,11 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
return ETIMEDOUT;
/* Wait until thread terminates. */
- if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+ // XYZ: Lost the lock to check whether it was private.
+ if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
return ETIMEDOUT;
}
return 0;
}
-
#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h
index 79f3ddeca..4c7d08c92 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Libr \ary; if not, write to the Free
+ License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
@@ -24,6 +24,7 @@
#include <bits/pthreadtypes.h>
#include <atomic.h>
#include <sysdep.h>
+#include <bits/kernel-features.h>
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
@@ -31,267 +32,231 @@
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define lll_futex_wait(futexp, val) \
- ({ \
- INTERNAL_SYSCALL_DECL (__err); \
- long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), 0); \
- __ret; \
- })
+#define lll_futex_wait(futexp, val, private) \
+ lll_futex_timed_wait(futexp, val, NULL, private)
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), (timespec)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAIT, private), \
+ (val), (timespec)); \
__ret; \
})
-#define lll_futex_wake(futexp, nr) \
+#define lll_futex_wake(futexp, nr, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAKE, (nr), 0); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAKE, private), \
+ (nr), 0); \
__ret; \
})
-#define lll_robust_mutex_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
int *__futexp = &(futexv); \
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- lll_futex_wake (__futexp, 1); \
+ lll_futex_wake (__futexp, 1, private); \
} \
while (0)
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 6, \
- (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \
- (nr_move), (mutex), (val)); \
- __ret; \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+ (nr_wake), (nr_move), (mutex), (val)); \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 6, \
- (futexp), FUTEX_WAKE_OP, (nr_wake), \
- (nr_wake2), (futexp2), \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
+ (nr_wake), (nr_wake2), (futexp2), \
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
- __ret; \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
-static inline int __attribute__((always_inline))
-__lll_mutex_trylock (int *futex)
-{
- int flag = 1, old;
-#ifdef __thumb__
- old = atomic_exchange_acq (futex, flag);
- if (old < 1)
- flag = 0;
- else if (old > 1)
- flag = atomic_exchange_acq (futex, old);
-#else
- __asm__ __volatile__ (
- "\tswp %[old], %[flag], [%[futex]] @ try to take the lock\n"
- "\tcmp %[old], #1 @ check old lock value\n"
- "\tmovlo %[flag], #0 @ if we got it, return 0\n"
- "\tswphi %[flag], %[old], [%[futex]] @ if it was contested,\n"
- " @ restore the contested flag,\n"
- " @ and check whether that won."
- : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old)
- : : "memory" );
-#endif
+#define lll_trylock(lock) \
+ atomic_compare_and_exchange_val_acq(&(lock), 1, 0)
+
+#define lll_cond_trylock(lock) \
+ atomic_compare_and_exchange_val_acq(&(lock), 2, 0)
+
+#define __lll_robust_trylock(futex, id) \
+ (atomic_compare_and_exchange_val_acq (futex, id, 0) != 0)
+#define lll_robust_trylock(lock, id) \
+ __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define __lll_lock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, \
+ 1, 0), 0)) \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
+ } \
+ }))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+#define __lll_robust_lock(futex, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
+ })
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
+
+
+#define __lll_cond_lock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ if (__builtin_expect (atomic_exchange_acq (__futex, 2), 0)) \
+ __lll_lock_wait (__futex, private); \
+ }))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
- return flag;
-}
-#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
-
-
-static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock (int *futex)
-{
- int flag = 2, old;
-#ifdef __thumb__
- old = atomic_exchange_acq (futex, flag);
- if (old < 1)
- flag = 0;
- else if (old > 1)
- flag = atomic_exchange_acq (futex, old);
-#else
- __asm__ __volatile__ (
- "\tswp %[old], %[flag], [%[futex]] @ try to take the lock\n"
- "\tcmp %[old], #1 @ check old lock value\n"
- "\tmovlo %[flag], #0 @ if we got it, return 0\n"
- "\tswphi %[flag], %[old], [%[futex]] @ if it was contested,\n"
- " @ restore the contested flag,\n"
- " @ and check whether that won."
- : [futex] "+&r" (futex), [flag] "+&r" (flag), [old] "=&r" (old)
- : : "memory" );
-#endif
- return flag;
-}
-#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
-
-
-static inline int __attribute__((always_inline))
-__lll_robust_mutex_trylock(int *futex, int id)
-{
- return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
-}
-#define lll_robust_mutex_trylock(lock, id) \
- __lll_robust_mutex_trylock (&(lock), id)
-
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
-
-static inline void __attribute__((always_inline))
-__lll_mutex_lock (int *futex)
-{
- int val = atomic_exchange_acq (futex, 1);
-
- if (__builtin_expect (val != 0, 0))
- {
- while (atomic_exchange_acq (futex, 2) != 0)
- lll_futex_wait (futex, 2);
- }
-}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
-
-
-static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_lock (int *futex, int id)
-{
- int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex);
- return result;
-}
-#define lll_robust_mutex_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), id)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
-{
- int val = atomic_exchange_acq (futex, 2);
-
- if (__builtin_expect (val != 0, 0))
- {
- while (atomic_exchange_acq (futex, 2) != 0)
- lll_futex_wait (futex, 2);
- }
-}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
-
-
-#define lll_robust_mutex_cond_lock(futex, id) \
- __lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
-
-
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
-
-static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
-{
- int result = 0;
- int val = atomic_exchange_acq (futex, 1);
-
- if (__builtin_expect (val != 0, 0))
- result = __lll_timedlock_wait (futex, abstime);
- return result;
-}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
-
-
-static inline int __attribute__ ((always_inline))
-__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
- int id)
-{
- int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_timedlock_wait (futex, abstime);
- return result;
-}
-#define lll_robust_mutex_timedlock(futex, abstime, id) \
- __lll_robust_mutex_timedlock (&(futex), abstime, id)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
-{
- int val = atomic_exchange_rel (futex, 0);
- if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
-
-static inline void __attribute__ ((always_inline))
-__lll_robust_mutex_unlock (int *futex, int mask)
-{
- int val = atomic_exchange_rel (futex, 0);
- if (__builtin_expect (val & mask, 0))
- lll_futex_wake (futex, 1);
-}
-#define lll_robust_mutex_unlock(futex) \
- __lll_robust_mutex_unlock(&(futex), FUTEX_WAITERS)
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
-{
- (void) atomic_exchange_rel (futex, 0);
- lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
-
-
-#define lll_mutex_islocked(futex) \
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+
+#define __lll_timedlock(futex, abstime, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_exchange_acq (__futex, 1), 0)) \
+ __val = __lll_timedlock_wait (__futex, abstime, private); \
+ __val; \
+ })
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
+
+
+#define __lll_robust_timedlock(futex, abstime, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
+ __val; \
+ })
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
+
+
+#define __lll_unlock(futex, private) \
+ (void) \
+ ({ int *__futex = (futex); \
+ int __oldval = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__oldval > 1, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ })
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
+
+
+#define __lll_robust_unlock(futex, private) \
+ (void) \
+ ({ int *__futex = (futex); \
+ int __oldval = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__oldval & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ })
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
+
+
+#define lll_islocked(futex) \
(futex != 0)
/* Our internal lock implementation is identical to the binary-compatible
mutex implementation. */
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
/* The states of a lock are:
0 - untaken
1 - taken by one user
>1 - taken by more users */
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -300,7 +265,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid); \
+ lll_futex_wait (&(tid), __tid, LLL_SHARED);\
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -314,26 +279,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c
index c8925810c..d81ecd4e5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/pthread_once.c
@@ -27,7 +27,7 @@ clear_once_control (void *arg)
pthread_once_t *once_control = (pthread_once_t *) arg;
*once_control = 0;
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
}
int
@@ -66,7 +66,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
break;
/* Same generation, some other thread was faster. Wait. */
- lll_futex_wait (once_control, oldval);
+ lll_futex_wait (once_control, oldval, LLL_PRIVATE);
}
/* This thread is the first here. Do the initialization.
@@ -82,7 +82,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
*once_control = __fork_generation | 2;
/* Wake up all other threads. */
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h
index 350d9af50..95d532802 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/sysdep-cancel.h
@@ -24,11 +24,6 @@
#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
-/* NOTE: We do mark syscalls with unwind annotations, for the benefit of
- cancellation; but they're really only accurate at the point of the
- syscall. The ARM unwind directives are not rich enough without adding
- a custom personality function. */
-
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.section ".text"; \
@@ -48,55 +43,38 @@
cmn r0, $4096; \
PSEUDO_RET; \
.Lpseudo_cancel: \
- .fnstart; \
DOCARGS_##args; /* save syscall args etc. around CENABLE. */ \
CENABLE; \
mov ip, r0; /* put mask in safe place. */ \
UNDOCARGS_##args; /* restore syscall args. */ \
- ldr r7, =SYS_ify (syscall_name); \
- swi 0x0; /* do the call. */ \
- .fnend; /* Past here we can't easily unwind. */ \
- mov r7, r0; /* save syscall return value. */ \
+ swi SYS_ify (syscall_name); /* do the call. */ \
+ str r0, [sp, $-4]!; /* save syscall return value. */ \
mov r0, ip; /* get mask back. */ \
CDISABLE; \
- mov r0, r7; /* retrieve return value. */ \
- RESTORE_LR_##args; \
+ ldmfd sp!, {r0, lr}; /* retrieve return value and address. */ \
UNDOARGS_##args; \
cmn r0, $4096;
-/* DOARGS pushes four bytes on the stack for five arguments, eight bytes for
- six arguments, and nothing for fewer. In order to preserve doubleword
- alignment, sometimes we must save an extra register. */
-
-# define RESTART_UNWIND .fnend; .fnstart; .save {r7, lr}
-
-# define DOCARGS_0 stmfd sp!, {r7, lr}; .save {r7, lr}
+# define DOCARGS_0 str lr, [sp, #-4]!;
# define UNDOCARGS_0
-# define RESTORE_LR_0 ldmfd sp!, {r7, lr};
-# define DOCARGS_1 stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8
-# define UNDOCARGS_1 ldr r0, [sp], #8; RESTART_UNWIND
-# define RESTORE_LR_1 RESTORE_LR_0
+# define DOCARGS_1 stmfd sp!, {r0, lr};
+# define UNDOCARGS_1 ldr r0, [sp], #4;
-# define DOCARGS_2 stmfd sp!, {r0, r1, r7, lr}; .save {r7, lr}; .pad #8
-# define UNDOCARGS_2 ldmfd sp!, {r0, r1}; RESTART_UNWIND
-# define RESTORE_LR_2 RESTORE_LR_0
+# define DOCARGS_2 stmfd sp!, {r0, r1, lr};
+# define UNDOCARGS_2 ldmfd sp!, {r0, r1};
-# define DOCARGS_3 stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16
-# define UNDOCARGS_3 ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND
-# define RESTORE_LR_3 RESTORE_LR_0
+# define DOCARGS_3 stmfd sp!, {r0, r1, r2, lr};
+# define UNDOCARGS_3 ldmfd sp!, {r0, r1, r2};
-# define DOCARGS_4 stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #16
-# define UNDOCARGS_4 ldmfd sp!, {r0, r1, r2, r3}; RESTART_UNWIND
-# define RESTORE_LR_4 RESTORE_LR_0
+# define DOCARGS_4 stmfd sp!, {r0, r1, r2, r3, lr};
+# define UNDOCARGS_4 ldmfd sp!, {r0, r1, r2, r3};
-# define DOCARGS_5 .save {r4}; stmfd sp!, {r0, r1, r2, r3, r4, r7, lr}; .save {r7, lr}; .pad #20
-# define UNDOCARGS_5 ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4}; .save {r7, lr}; .pad #4
-# define RESTORE_LR_5 ldmfd sp!, {r4, r7, lr}
+# define DOCARGS_5 DOCARGS_4
+# define UNDOCARGS_5 UNDOCARGS_4
-# define DOCARGS_6 .save {r4, r5}; stmfd sp!, {r0, r1, r2, r3, r7, lr}; .save {r7, lr}; .pad #20
-# define UNDOCARGS_6 ldmfd sp!, {r0, r1, r2, r3}; .fnend; .fnstart; .save {r4, r5}; .save {r7, lr}
-# define RESTORE_LR_6 RESTORE_LR_0
+# define DOCARGS_6 DOCARGS_5
+# define UNDOCARGS_6 UNDOCARGS_5
# ifdef IS_IN_libpthread
# define CENABLE bl PLTJMP(__pthread_enable_asynccancel)
@@ -151,3 +129,9 @@ extern int __local_multiple_threads attribute_hidden;
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
index 206202809..e19facfb8 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>.
@@ -18,97 +18,94 @@
Boston, MA 02111-1307, USA. */
#include <dlfcn.h>
-#include <string.h>
+#include <stdio.h>
#include <unwind.h>
-#include <unistd.h>
#include <pthreadP.h>
-#define __libc_dlopen(x) dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym dlsym
-
+static void *libgcc_s_handle;
static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
static _Unwind_Reason_Code (*libgcc_s_personality)
- (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
+ (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
static _Unwind_Reason_Code (*libgcc_s_forcedunwind)
(struct _Unwind_Exception *, _Unwind_Stop_Fn, void *);
static _Unwind_Word (*libgcc_s_getcfa) (struct _Unwind_Context *);
+static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *);
+static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);
void
+__attribute_noinline__
pthread_cancel_init (void)
{
void *resume, *personality, *forcedunwind, *getcfa;
void *handle;
+ void *sjlj_register, *sjlj_unregister;
- if (__builtin_expect (libgcc_s_getcfa != NULL, 1))
- return;
+ if (__builtin_expect (libgcc_s_handle != NULL, 1))
+ {
+ /* Force gcc to reload all values. */
+ asm volatile ("" ::: "memory");
+ return;
+ }
handle = __libc_dlopen ("libgcc_s.so.1");
if (handle == NULL
- || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
- || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL
- || (forcedunwind = __libc_dlsym (handle, "_Unwind_ForcedUnwind"))
+ || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL
+ || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL
+ || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL
+ || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL
+ || (forcedunwind = __libc_dlsym (handle, "_Unwind_SjLj_ForcedUnwind"))
== NULL
|| (getcfa = __libc_dlsym (handle, "_Unwind_GetCFA")) == NULL
-#ifdef ARCH_CANCEL_INIT
- || ARCH_CANCEL_INIT (handle)
-#endif
)
- {
-# define STR_N_LEN(str) str, strlen (str)
- INTERNAL_SYSCALL_DECL (err);
- INTERNAL_SYSCALL (write, err, 3, STDERR_FILENO,
- STR_N_LEN ("libgcc_s.so.1 must be installed for pthread_cancel to work\n"));
- abort ();
- }
+ __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
libgcc_s_resume = resume;
libgcc_s_personality = personality;
libgcc_s_forcedunwind = forcedunwind;
+ libgcc_s_sjlj_register = sjlj_register;
+ libgcc_s_sjlj_unregister = sjlj_unregister;
libgcc_s_getcfa = getcfa;
+ /* Make sure libgcc_s_getcfa is written last. Otherwise,
+ pthread_cancel_init might return early even when the pointer the
+ caller is interested in is not initialized yet. */
+ atomic_write_barrier ();
+ libgcc_s_handle = handle;
+}
+
+void
+__libc_freeres_fn_section
+__unwind_freeres (void)
+{
+ void *handle = libgcc_s_handle;
+ if (handle != NULL)
+ {
+ libgcc_s_handle = NULL;
+ __libc_dlclose (handle);
+ }
}
-/* It's vitally important that _Unwind_Resume not have a stack frame; the
- ARM unwinder relies on register state at entrance. So we write this in
- assembly. */
-
-asm (
-#ifdef __thumb__
-" .code 32"
-#endif
-" .globl _Unwind_Resume\n"
-" .type _Unwind_Resume, %function\n"
-"_Unwind_Resume:\n"
-" stmfd sp!, {r4, r5, r6, lr}\n"
-" ldr r4, 1f\n"
-" ldr r5, 2f\n"
-"3: add r4, pc, r4\n"
-" ldr r3, [r4, r5]\n"
-" mov r6, r0\n"
-" cmp r3, #0\n"
-" beq 4f\n"
-"5: mov r0, r6\n"
-" ldmfd sp!, {r4, r5, r6, lr}\n"
-" bx r3\n"
-"4: bl pthread_cancel_init\n"
-" ldr r3, [r4, r5]\n"
-" b 5b\n"
-"1: .word _GLOBAL_OFFSET_TABLE_ - 3b - 8\n"
-"2: .word libgcc_s_resume(GOTOFF)\n"
-" .size _Unwind_Resume, .-_Unwind_Resume\n"
-#ifdef __thumb__
-" .code 16"
-#endif
-);
+void
+_Unwind_Resume (struct _Unwind_Exception *exc)
+{
+ if (__builtin_expect (libgcc_s_resume == NULL, 0))
+ pthread_cancel_init ();
+
+ libgcc_s_resume (exc);
+}
_Unwind_Reason_Code
-__gcc_personality_v0 (_Unwind_State state,
- struct _Unwind_Exception *ue_header,
- struct _Unwind_Context *context)
+__gcc_personality_v0 (int version, _Unwind_Action actions,
+ _Unwind_Exception_Class exception_class,
+ struct _Unwind_Exception *ue_header,
+ struct _Unwind_Context *context)
{
if (__builtin_expect (libgcc_s_personality == NULL, 0))
pthread_cancel_init ();
- return libgcc_s_personality (state, ue_header, context);
+
+ return libgcc_s_personality (version, actions, exception_class,
+ ue_header, context);
}
_Unwind_Reason_Code
@@ -117,6 +114,7 @@ _Unwind_ForcedUnwind (struct _Unwind_Exception *exc, _Unwind_Stop_Fn stop,
{
if (__builtin_expect (libgcc_s_forcedunwind == NULL, 0))
pthread_cancel_init ();
+
return libgcc_s_forcedunwind (exc, stop, stop_argument);
}
@@ -125,5 +123,24 @@ _Unwind_GetCFA (struct _Unwind_Context *context)
{
if (__builtin_expect (libgcc_s_getcfa == NULL, 0))
pthread_cancel_init ();
+
return libgcc_s_getcfa (context);
}
+
+void
+_Unwind_SjLj_Register (struct SjLj_Function_Context *fc)
+{
+ if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0))
+ pthread_cancel_init ();
+
+ libgcc_s_sjlj_register (fc);
+}
+
+void
+_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc)
+{
+ if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0))
+ pthread_cancel_init ();
+
+ libgcc_s_sjlj_unregister (fc);
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
index 99b15746a..8dcfd34b1 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>.
@@ -19,77 +19,69 @@
#include <dlfcn.h>
#include <stdio.h>
-#include <stdlib.h>
#include <unwind.h>
-#define __libc_dlopen(x) dlopen(x, (RTLD_LOCAL | RTLD_LAZY))
-#define __libc_dlsym dlsym
-
static void (*libgcc_s_resume) (struct _Unwind_Exception *exc);
static _Unwind_Reason_Code (*libgcc_s_personality)
- (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *);
-
-static void init (void) __attribute_used__;
+ (int, _Unwind_Action, _Unwind_Exception_Class, struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+static void (*libgcc_s_sjlj_register) (struct SjLj_Function_Context *);
+static void (*libgcc_s_sjlj_unregister) (struct SjLj_Function_Context *);
static void
init (void)
{
void *resume, *personality;
void *handle;
+ void *sjlj_register, *sjlj_unregister;
handle = __libc_dlopen ("libgcc_s.so.1");
if (handle == NULL
- || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
- || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL) {
- fprintf(stderr, "libgcc_s.so.1 must be installed for pthread_cancel to work\n");
- abort ();
- }
+ || (sjlj_register = __libc_dlsym (handle, "_Unwind_SjLj_Register")) == NULL
+ || (sjlj_unregister = __libc_dlsym (handle, "_Unwind_SjLj_Unregister")) == NULL
+ || (resume = __libc_dlsym (handle, "_Unwind_SjLj_Resume")) == NULL
+ || (personality = __libc_dlsym (handle, "__gcc_personality_sj0")) == NULL)
+ __libc_fatal ("libgcc_s.so.1 must be installed for pthread_cancel to work\n");
libgcc_s_resume = resume;
libgcc_s_personality = personality;
+ libgcc_s_sjlj_register = sjlj_register;
+ libgcc_s_sjlj_unregister = sjlj_unregister;
}
-/* It's vitally important that _Unwind_Resume not have a stack frame; the
- ARM unwinder relies on register state at entrance. So we write this in
- assembly. */
-
-__asm__ (
-#ifdef __thumb__
-" .code 32\n"
-#endif
-" .globl _Unwind_Resume\n"
-" .type _Unwind_Resume, %function\n"
-"_Unwind_Resume:\n"
-" stmfd sp!, {r4, r5, r6, lr}\n"
-" ldr r4, 1f\n"
-" ldr r5, 2f\n"
-"3: add r4, pc, r4\n"
-" ldr r3, [r4, r5]\n"
-" mov r6, r0\n"
-" cmp r3, #0\n"
-" beq 4f\n"
-"5: mov r0, r6\n"
-" ldmfd sp!, {r4, r5, r6, lr}\n"
-" bx r3\n"
-"4: bl init\n"
-" ldr r3, [r4, r5]\n"
-" b 5b\n"
-"1: .word _GLOBAL_OFFSET_TABLE_ - 3b - 8\n"
-"2: .word libgcc_s_resume(GOTOFF)\n"
-" .size _Unwind_Resume, .-_Unwind_Resume\n"
-#ifdef __thumb__
-" .code 16\n"
-#endif
-
-);
+void
+_Unwind_Resume (struct _Unwind_Exception *exc)
+{
+ if (__builtin_expect (libgcc_s_resume == NULL, 0))
+ init ();
+ libgcc_s_resume (exc);
+}
_Unwind_Reason_Code
-__gcc_personality_v0 (_Unwind_State state,
- struct _Unwind_Exception *ue_header,
- struct _Unwind_Context *context)
+__gcc_personality_v0 (int version, _Unwind_Action actions,
+ _Unwind_Exception_Class exception_class,
+ struct _Unwind_Exception *ue_header,
+ struct _Unwind_Context *context)
{
if (__builtin_expect (libgcc_s_personality == NULL, 0))
init ();
- return libgcc_s_personality (state, ue_header, context);
+ return libgcc_s_personality (version, actions, exception_class,
+ ue_header, context);
+}
+
+void
+_Unwind_SjLj_Register (struct SjLj_Function_Context *fc)
+{
+ if (__builtin_expect (libgcc_s_sjlj_register == NULL, 0))
+ init ();
+ libgcc_s_sjlj_register (fc);
+}
+
+void
+_Unwind_SjLj_Unregister (struct SjLj_Function_Context *fc)
+{
+ if (__builtin_expect (libgcc_s_sjlj_unregister == NULL, 0))
+ init ();
+ libgcc_s_sjlj_unregister (fc);
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h
index d625fb288..eeb9cf8b6 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind.h
@@ -1,5 +1,5 @@
/* Header file for the ARM EABI unwinder
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2009 Free Software Foundation, Inc.
Contributed by Paul Brook
This file is free software; you can redistribute it and/or modify it
@@ -267,6 +267,11 @@ extern "C" {
#define _Unwind_SetIP(context, val) \
_Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1))
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)
+ (struct _Unwind_Context *, void *);
+
+extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h b/libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h
index b639ba44a..8f0df4f92 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/bits/local_lim.h
@@ -1,5 +1,5 @@
/* Minimum guaranteed maximum values for system limits. Linux version.
- Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+ Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
#ifndef OPEN_MAX
# define __undef_OPEN_MAX
#endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
/* The kernel sources contain a file with all the needed information. */
#include <linux/limits.h>
@@ -50,6 +53,11 @@
# undef OPEN_MAX
# undef __undef_OPEN_MAX
#endif
+/* Have to remove ARG_MAX? */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
/* The number of data keys per process. */
#define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
/* Maximum message queue priority level. */
#define MQ_PRIO_MAX 32768
+
+/* Maximum value the semaphore can have. */
+#define SEM_VALUE_MAX (2147483647)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h b/libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h
index 92c2d3282..2550355cd 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/bits/posix_opt.h
@@ -1,5 +1,5 @@
/* Define POSIX options for Linux.
- Copyright (C) 1996-2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1996-2004, 2006, 2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,8 +17,8 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#ifndef _POSIX_OPT_H
-#define _POSIX_OPT_H 1
+#ifndef _BITS_POSIX_OPT_H
+#define _BITS_POSIX_OPT_H 1
/* Job control is supported. */
#define _POSIX_JOB_CONTROL 1
@@ -27,28 +27,28 @@
#define _POSIX_SAVED_IDS 1
/* Priority scheduling is supported. */
-#define _POSIX_PRIORITY_SCHEDULING 200112L
+#define _POSIX_PRIORITY_SCHEDULING 200809L
/* Synchronizing file data is supported. */
-#define _POSIX_SYNCHRONIZED_IO 200112L
+#define _POSIX_SYNCHRONIZED_IO 200809L
/* The fsync function is present. */
-#define _POSIX_FSYNC 200112L
+#define _POSIX_FSYNC 200809L
/* Mapping of files to memory is supported. */
-#define _POSIX_MAPPED_FILES 200112L
+#define _POSIX_MAPPED_FILES 200809L
/* Locking of all memory is supported. */
-#define _POSIX_MEMLOCK 200112L
+#define _POSIX_MEMLOCK 200809L
/* Locking of ranges of memory is supported. */
-#define _POSIX_MEMLOCK_RANGE 200112L
+#define _POSIX_MEMLOCK_RANGE 200809L
/* Setting of memory protections is supported. */
-#define _POSIX_MEMORY_PROTECTION 200112L
+#define _POSIX_MEMORY_PROTECTION 200809L
-/* Only root can change owner of file. */
-#define _POSIX_CHOWN_RESTRICTED 1
+/* Some filesystems allow all users to change file ownership. */
+#define _POSIX_CHOWN_RESTRICTED 0
/* `c_cc' member of 'struct termios' structure can be disabled by
using the value _POSIX_VDISABLE. */
@@ -60,38 +60,56 @@
/* X/Open realtime support is available. */
#define _XOPEN_REALTIME 1
+/* X/Open thread realtime support is available. */
+#define _XOPEN_REALTIME_THREADS 1
+
/* XPG4.2 shared memory is supported. */
#define _XOPEN_SHM 1
/* Tell we have POSIX threads. */
-#define _POSIX_THREADS 200112L
+#define _POSIX_THREADS 200809L
/* We have the reentrant functions described in POSIX. */
#define _POSIX_REENTRANT_FUNCTIONS 1
-#define _POSIX_THREAD_SAFE_FUNCTIONS 200112L
+#define _POSIX_THREAD_SAFE_FUNCTIONS 200809L
/* We provide priority scheduling for threads. */
-#define _POSIX_THREAD_PRIORITY_SCHEDULING 200112L
+#define _POSIX_THREAD_PRIORITY_SCHEDULING 200809L
/* We support user-defined stack sizes. */
-#define _POSIX_THREAD_ATTR_STACKSIZE 200112L
+#define _POSIX_THREAD_ATTR_STACKSIZE 200809L
/* We support user-defined stacks. */
-#define _POSIX_THREAD_ATTR_STACKADDR 200112L
+#define _POSIX_THREAD_ATTR_STACKADDR 200809L
+
+/* We support priority inheritence. */
+#define _POSIX_THREAD_PRIO_INHERIT 200809L
+
+/* We support priority protection, though only for non-robust
+ mutexes. */
+#define _POSIX_THREAD_PRIO_PROTECT 200809L
+
+#ifdef __USE_XOPEN2K8
+/* We support priority inheritence for robust mutexes. */
+# define _POSIX_THREAD_ROBUST_PRIO_INHERIT 200809L
+
+/* We do not support priority protection for robust mutexes. */
+# define _POSIX_THREAD_ROBUST_PRIO_PROTECT -1
+#endif
/* We support POSIX.1b semaphores. */
-#define _POSIX_SEMAPHORES 200112L
+#define _POSIX_SEMAPHORES 200809L
/* Real-time signals are supported. */
-#define _POSIX_REALTIME_SIGNALS 200112L
+#define _POSIX_REALTIME_SIGNALS 200809L
/* We support asynchronous I/O. */
-#define _POSIX_ASYNCHRONOUS_IO 200112L
+#define _POSIX_ASYNCHRONOUS_IO 200809L
#define _POSIX_ASYNC_IO 1
/* Alternative name for Unix98. */
#define _LFS_ASYNCHRONOUS_IO 1
/* Support for prioritization is also available. */
-#define _POSIX_PRIORITIZED_IO 200112L
+#define _POSIX_PRIORITIZED_IO 200809L
/* The LFS support in asynchronous I/O is also available. */
#define _LFS64_ASYNCHRONOUS_IO 1
@@ -102,7 +120,7 @@
#define _LFS64_STDIO 1
/* POSIX shared memory objects are implemented. */
-#define _POSIX_SHARED_MEMORY_OBJECTS 200112L
+#define _POSIX_SHARED_MEMORY_OBJECTS 200809L
/* CPU-time clocks support needs to be checked at runtime. */
#define _POSIX_CPUTIME 0
@@ -114,49 +132,49 @@
#define _POSIX_REGEXP 1
/* Reader/Writer locks are available. */
-#define _POSIX_READER_WRITER_LOCKS 200112L
+#define _POSIX_READER_WRITER_LOCKS 200809L
/* We have a POSIX shell. */
#define _POSIX_SHELL 1
/* We support the Timeouts option. */
-#define _POSIX_TIMEOUTS 200112L
+#define _POSIX_TIMEOUTS 200809L
/* We support spinlocks. */
-#define _POSIX_SPIN_LOCKS 200112L
+#define _POSIX_SPIN_LOCKS 200809L
/* The `spawn' function family is supported. */
-#define _POSIX_SPAWN 200112L
+#define _POSIX_SPAWN 200809L
/* We have POSIX timers. */
-#define _POSIX_TIMERS 200112L
+#define _POSIX_TIMERS 200809L
/* The barrier functions are available. */
-#define _POSIX_BARRIERS 200112L
+#define _POSIX_BARRIERS 200809L
/* POSIX message queues are available. */
-#define _POSIX_MESSAGE_PASSING 200112L
+#define _POSIX_MESSAGE_PASSING 200809L
/* Thread process-shared synchronization is supported. */
-#define _POSIX_THREAD_PROCESS_SHARED 200112L
+#define _POSIX_THREAD_PROCESS_SHARED 200809L
/* The monotonic clock might be available. */
#define _POSIX_MONOTONIC_CLOCK 0
/* The clock selection interfaces are available. */
-#define _POSIX_CLOCK_SELECTION 200112L
+#define _POSIX_CLOCK_SELECTION 200809L
/* Advisory information interfaces are available. */
-#define _POSIX_ADVISORY_INFO 200112L
+#define _POSIX_ADVISORY_INFO 200809L
/* IPv6 support is available. */
-#define _POSIX_IPV6 200112L
+#define _POSIX_IPV6 200809L
/* Raw socket support is available. */
-#define _POSIX_RAW_SOCKETS 200112L
+#define _POSIX_RAW_SOCKETS 200809L
/* We have at least one terminal. */
-#define _POSIX2_CHAR_TERM 200112L
+#define _POSIX2_CHAR_TERM 200809L
/* Neither process nor thread sporadic server interfaces is available. */
#define _POSIX_SPORADIC_SERVER -1
@@ -171,8 +189,4 @@
/* Typed memory objects are not available. */
#define _POSIX_TYPED_MEMORY_OBJECTS -1
-/* No support for priority inheritance or protection so far. */
-#define _POSIX_THREAD_PRIO_INHERIT -1
-#define _POSIX_THREAD_PRIO_PROTECT -1
-
-#endif /* posix_opt.h */
+#endif /* bits/posix_opt.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/fork.c
index a84b5c237..2d4cae224 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/fork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/fork.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -58,8 +58,9 @@ fresetlockfiles (void)
#endif
}
-extern __typeof(fork) __libc_fork;
-pid_t __libc_fork (void)
+
+pid_t
+__libc_fork (void)
{
pid_t pid;
struct used_handler
@@ -73,6 +74,9 @@ pid_t __libc_fork (void)
struct fork_handler *runp;
while ((runp = __fork_handlers) != NULL)
{
+ /* Make sure we read from the current RUNP pointer. */
+ atomic_full_barrier ();
+
unsigned int oldval = runp->refcntr;
if (oldval == 0)
@@ -166,6 +170,8 @@ pid_t __libc_fork (void)
/* Reset locks in the I/O code. */
STDIO_INIT_MUTEX(_stdio_openlist_add_lock);
+ /* XXX reset any locks in dynamic loader */
+
/* Run the handlers registered for the child. */
while (allp != NULL)
{
@@ -173,8 +179,11 @@ pid_t __libc_fork (void)
allp->handler->child_handler ();
/* Note that we do not have to wake any possible waiter.
- This is the only thread in the new process. */
- --allp->handler->refcntr;
+ This is the only thread in the new process. The count
+ may have been bumped up by other threads doing a fork.
+ We reset it to 1, to avoid waiting for non-existing
+ thread(s) to release the count. */
+ allp->handler->refcntr = 1;
/* XXX We could at this point look through the object pool
and mark all objects not on the __fork_handlers list as
@@ -186,7 +195,7 @@ pid_t __libc_fork (void)
}
/* Initialize the fork lock. */
- __fork_lock = (lll_lock_t) LLL_LOCK_INITIALIZER;
+ __fork_lock = LLL_LOCK_INITIALIZER;
}
else
{
@@ -206,7 +215,7 @@ pid_t __libc_fork (void)
if (atomic_decrement_and_test (&allp->handler->refcntr)
&& allp->handler->need_signal)
- lll_futex_wake (allp->handler->refcntr, 1);
+ lll_futex_wake (allp->handler->refcntr, 1, LLL_PRIVATE);
allp = allp->next;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/fork.h b/libpthread/nptl/sysdeps/unix/sysv/linux/fork.h
index bcdf6217c..a00cfabe2 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/fork.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/fork.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -26,7 +26,7 @@ extern unsigned long int __fork_generation attribute_hidden;
extern unsigned long int *__fork_generation_pointer attribute_hidden;
/* Lock to protect allocation and deallocation of fork handlers. */
-extern lll_lock_t __fork_lock attribute_hidden;
+extern int __fork_lock attribute_hidden;
/* Elements of the fork handler lists. */
struct fork_handler
@@ -41,7 +41,7 @@ struct fork_handler
};
/* The single linked list of all currently registered for handlers. */
-extern struct fork_handler *__fork_handlers;
+extern struct fork_handler *__fork_handlers attribute_hidden;
/* Function to call to unregister fork handlers. */
@@ -54,3 +54,7 @@ extern int __register_atfork (void (*__prepare) (void),
void (*__parent) (void),
void (*__child) (void),
void *dso_handle);
+libc_hidden_proto (__register_atfork)
+
+/* Add a new element to the fork list. */
+extern void __linkin_atfork (struct fork_handler *newp) attribute_hidden;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c b/libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c
index 9a4f51c9d..96e2bf439 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/getpid.c
@@ -22,11 +22,11 @@
#include <sysdep.h>
-extern __typeof(getpid) __getpid;
#ifndef NOT_IN_libc
-static __always_inline pid_t really_getpid (pid_t oldval);
+static inline __attribute__((always_inline)) pid_t really_getpid (pid_t oldval);
-static __always_inline pid_t really_getpid (pid_t oldval)
+static inline __attribute__((always_inline)) pid_t
+really_getpid (pid_t oldval)
{
if (__builtin_expect (oldval == 0, 1))
{
@@ -46,7 +46,8 @@ static __always_inline pid_t really_getpid (pid_t oldval)
}
#endif
-pid_t __getpid (void)
+pid_t
+__getpid (void)
{
#ifdef NOT_IN_libc
INTERNAL_SYSCALL_DECL (err);
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
index 740ee7fcb..9bb19381c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/Makefile.arch
@@ -15,7 +15,7 @@ libc_a_CSRC = fork.c
libc_a_SSRC = clone.S vfork.S
libpthread_SSRC += i486/lowlevellock.S i486/pthread_barrier_wait.S i486/pthread_cond_signal.S i486/pthread_cond_broadcast.S \
- i486/sem_post.S i486/sem_timedwait.S \
+ i486/lowlevelrobustlock.S i486/sem_post.S i486/sem_timedwait.S \
i486/sem_trywait.S i486/sem_wait.S i486/pthread_rwlock_rdlock.S i486/pthread_rwlock_wrlock.S \
i486/pthread_rwlock_timedrdlock.S i486/pthread_rwlock_timedwrlock.S i486/pthread_rwlock_unlock.S
#i486/pthread_cond_timedwait.S i486/pthread_cond_wait.S
@@ -31,6 +31,7 @@ endif
ASFLAGS-pt-vfork.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevelrobustlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-pthread_once.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-pthread_spin_unlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
index 0ec6e5534..9e3e016fb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/pthreadtypes.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -128,7 +128,10 @@ typedef union
unsigned int __nr_writers_queued;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
int __writer;
} __data;
char __size[__SIZEOF_PTHREAD_RWLOCK_T];
@@ -165,6 +168,6 @@ typedef union
/* Extra attributes for the cleanup functions. */
-#define __cleanup_fct_attribute __attribute ((regparm (1)))
+#define __cleanup_fct_attribute __attribute__ ((__regparm__ (1)))
#endif /* bits/pthreadtypes.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
index e6c5d845c..934493c30 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/bits/semaphore.h
@@ -28,9 +28,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
index b874538b3..813e5299a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/fork.c
@@ -19,10 +19,10 @@
#include <sched.h>
#include <signal.h>
-#include <stdio.h>
#include <sysdep.h>
#include <tls.h>
+
#define ARCH_FORK() \
INLINE_SYSCALL (clone, 5, \
CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID | SIGCHLD, 0, \
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
index 223b11108..ce8ad27aa 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -17,14 +17,4 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-/* In libc.so we do not unconditionally use the lock prefix. Only if
- the application is using threads. */
-#ifndef UP
-# define LOCK \
- cmpl $0, %gs:MULTIPLE_THREADS_OFFSET; \
- je,pt 0f; \
- lock; \
-0:
-#endif
-
#include "lowlevellock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
index 955e119ab..61255a0af 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,34 +19,74 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+ cfi_startproc
pushl %edx
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
movl $2, %edx
movl %ecx, %ebx
xorl %esi, %esi /* No timeout. */
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ LOAD_PRIVATE_FUTEX_WAIT (%ecx)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -58,41 +98,162 @@ __lll_mutex_lock_wait:
xchgl %eax, (%ebx) /* NB: lock is implied */
testl %eax, %eax
- jnz,pn 1b
+ jnz 1b
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
ret
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
+ .align 16
+__lll_lock_wait:
+ cfi_startproc
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
+
+ movl %edx, %ebx
+ movl $2, %edx
+ xorl %esi, %esi /* No timeout. */
+ LOAD_FUTEX_WAIT (%ecx)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+2: movl %edx, %eax
+ xchgl %eax, (%ebx) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ ret
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* %ecx: futex
+ %esi: flags
+ %edx: timeout
+ %eax: futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+ cfi_startproc
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ LOAD_PIC_REG (bx)
+ cmpl $0, __have_futex_clock_realtime@GOTOFF(%ebx)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ movl %ecx, %ebx
+ movl %esi, %ecx
+ movl %edx, %esi
+ movl $0xffffffff, %ebp
+ LOAD_FUTEX_WAIT_ABS (%ecx)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ ENTER_KERNEL
+
+2: xchgl %edx, (%ebx) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+7: popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ ret
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpl $1000000000, 4(%edx)
jae 3f
- pushl %edi
pushl %esi
- pushl %ebx
- pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
+ pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
/* Stack frame for the timespec and timeval structs. */
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl %ecx, %ebp
movl %edx, %edi
+ movl $2, %edx
+ xchgl %edx, (%ebp)
+
+ test %edx, %edx
+ je 6f
+
1:
/* Get current time. */
movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -107,116 +268,128 @@ __lll_mutex_timedlock_wait:
addl $1000000000, %edx
subl $1, %ecx
4: testl %ecx, %ecx
- js 5f /* Time is already up. */
+ js 2f /* Time is already up. */
/* Store relative timeout. */
movl %ecx, (%esp)
movl %edx, 4(%esp)
+ /* Futex call. */
movl %ebp, %ebx
-
- movl $1, %eax
movl $2, %edx
- LOCK
- cmpxchgl %edx, (%ebx)
-
- testl %eax, %eax
- je 8f
-
- /* Futex call. */
movl %esp, %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+ movl 16(%esp), %ecx
+ LOAD_FUTEX_WAIT (%ecx)
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
-8: /* NB: %edx == 2 */
- xorl %eax, %eax
- LOCK
- cmpxchgl %edx, (%ebx)
+ /* NB: %edx == 2 */
+ xchgl %edx, (%ebp)
- jnz 7f
+ testl %edx, %edx
+ je 6f
+
+ cmpl $-ETIMEDOUT, %eax
+ jne 1b
+2: movl $ETIMEDOUT, %edx
6: addl $8, %esp
- popl %ebp
- popl %ebx
- popl %esi
+ cfi_adjust_cfa_offset(-8)
popl %edi
- ret
-
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- je 5f
-
- /* Make sure the current holder knows we are going to sleep. */
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+7: popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
movl %edx, %eax
- xchgl %eax, (%ebx)
- testl %eax, %eax
- jz 6b
- jmp 1b
-
-3: movl $EINVAL, %eax
ret
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+3: movl $EINVAL, %edx
+ jmp 7b
+# endif
+ cfi_endproc
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-
-#ifdef NOT_IN_libc
- .globl lll_unlock_wake_cb
- .type lll_unlock_wake_cb,@function
- .hidden lll_unlock_wake_cb
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ecx
+ cfi_adjust_cfa_offset(4)
pushl %edx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%ecx, -12)
+ cfi_offset(%edx, -16)
- movl 20(%esp), %ebx
- LOCK
- subl $1, (%ebx)
- je 1f
-
- movl $FUTEX_WAKE, %ecx
+ movl %eax, %ebx
+ movl $0, (%eax)
+ LOAD_PRIVATE_FUTEX_WAKE (%ecx)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
- movl $0, (%ebx)
ENTER_KERNEL
-1: popl %edx
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
popl %ecx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ecx)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ecx
+ cfi_adjust_cfa_offset(4)
pushl %edx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%ecx, -12)
+ cfi_offset(%edx, -16)
movl %eax, %ebx
movl $0, (%eax)
- movl $FUTEX_WAKE, %ecx
+ LOAD_FUTEX_WAKE (%ecx)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
ENTER_KERNEL
popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
popl %ecx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ecx)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
@@ -234,7 +407,7 @@ __lll_timedwait_tid:
/* Get current time. */
2: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -259,6 +432,8 @@ __lll_timedwait_tid:
jz 4f
movl %esp, %esi
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
movl %ebp, %ebx
movl $SYS_futex, %eax
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
new file mode 100644
index 000000000..596763444
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
@@ -0,0 +1,233 @@
+/* Copyright (C) 2002, 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+
+ .text
+
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %gs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+#endif
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
+ .align 16
+__lll_robust_lock_wait:
+ cfi_startproc
+ pushl %edx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edx, -8)
+ cfi_offset(%ebx, -12)
+ cfi_offset(%esi, -16)
+
+ movl %edx, %ebx
+ xorl %esi, %esi /* No timeout. */
+ LOAD_FUTEX_WAIT (%ecx)
+
+4: movl %eax, %edx
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 3f
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ je 1f
+
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 2f
+
+1: movl $SYS_futex, %eax
+ ENTER_KERNEL
+
+ movl (%ebx), %eax
+
+2: test %eax, %eax
+ jne 4b
+
+ movl %gs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 4b
+ /* NB: %eax == 0 */
+
+3: popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %edx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edx)
+ ret
+ cfi_endproc
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
+ .align 16
+__lll_robust_timedlock_wait:
+ cfi_startproc
+ /* Check for a valid timeout value. */
+ cmpl $1000000000, 4(%edx)
+ jae 3f
+
+ pushl %edi
+ cfi_adjust_cfa_offset(4)
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%edi, -8)
+ cfi_offset(%esi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
+
+ /* Stack frame for the timespec and timeval structs. */
+ subl $12, %esp
+ cfi_adjust_cfa_offset(12)
+
+ movl %ecx, %ebp
+ movl %edx, %edi
+
+1: movl %eax, 8(%esp)
+
+ /* Get current time. */
+ movl %esp, %ebx
+ xorl %ecx, %ecx
+ movl $__NR_gettimeofday, %eax
+ ENTER_KERNEL
+
+ /* Compute relative timeout. */
+ movl 4(%esp), %eax
+ movl $1000, %edx
+ mul %edx /* Milli seconds to nano seconds. */
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ subl (%esp), %ecx
+ subl %eax, %edx
+ jns 4f
+ addl $1000000000, %edx
+ subl $1, %ecx
+4: testl %ecx, %ecx
+ js 8f /* Time is already up. */
+
+ /* Store relative timeout. */
+ movl %ecx, (%esp)
+ movl %edx, 4(%esp)
+
+ movl %ebp, %ebx
+
+ movl 8(%esp), %edx
+ movl %edx, %eax
+ orl $FUTEX_WAITERS, %edx
+
+ testl $FUTEX_OWNER_DIED, %eax
+ jnz 6f
+
+ cmpl %eax, %edx
+ je 2f
+
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ movl $0, %ecx /* Must use mov to avoid changing cc. */
+ jnz 5f
+
+2:
+ /* Futex call. */
+ movl %esp, %esi
+ movl 20(%esp), %ecx
+ LOAD_FUTEX_WAIT (%ecx)
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ movl %eax, %ecx
+
+ movl (%ebx), %eax
+
+5: testl %eax, %eax
+ jne 7f
+
+ movl %gs:TID, %edx
+ orl $FUTEX_WAITERS, %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jnz 7f
+
+6: addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ ret
+
+3: movl $EINVAL, %eax
+ ret
+
+ cfi_adjust_cfa_offset(28)
+ cfi_offset(%edi, -8)
+ cfi_offset(%esi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
+ /* Check whether the time expired. */
+7: cmpl $-ETIMEDOUT, %ecx
+ jne 1b
+
+8: movl $ETIMEDOUT, %eax
+ jmp 6b
+ cfi_endproc
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
index 2af9e38cd..040d7f8c3 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,25 +18,19 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_barrier_wait
.type pthread_barrier_wait,@function
.align 16
pthread_barrier_wait:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
movl 8(%esp), %ebx
@@ -54,6 +48,8 @@ pthread_barrier_wait:
/* There are more threads to come. */
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
#if CURR_EVENT == 0
movl (%ebx), %edx
@@ -68,7 +64,13 @@ pthread_barrier_wait:
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
-7: xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+7:
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
xorl %esi, %esi
8: movl $SYS_futex, %eax
ENTER_KERNEL
@@ -81,7 +83,7 @@ pthread_barrier_wait:
#else
cmpl %edx, CURR_EVENT(%ebx)
#endif
- je,pn 8b
+ je 8b
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
@@ -91,7 +93,7 @@ pthread_barrier_wait:
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
- jne,pt 10f
+ jne 10f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
@@ -104,9 +106,16 @@ pthread_barrier_wait:
10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+
/* The necessary number of threads arrived. */
3:
#if CURR_EVENT == 0
@@ -119,6 +128,7 @@ pthread_barrier_wait:
so 0x7fffffff is the highest value. */
movl $0x7fffffff, %edx
movl $FUTEX_WAKE, %ecx
+ orl PRIVATE(%ebx), %ecx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -130,7 +140,7 @@ pthread_barrier_wait:
xaddl %edx, LEFT(%ebx)
subl $1, %ecx
cmpl %ecx, %edx
- jne,pt 5f
+ jne 5f
/* Release the mutex. We cannot release the lock before
waking the waiting threads since otherwise a new thread might
@@ -142,21 +152,36 @@ pthread_barrier_wait:
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
-1: leal MUTEX(%ebx), %ecx
- call __lll_mutex_lock_wait
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+1: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
+ xorl $LLL_SHARED, %ecx
+ call __lll_lock_wait
jmp 2b
-4: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+4: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 5b
-6: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
+6: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 7b
-9: leal MUTEX(%ebx), %eax
- call __lll_mutex_unlock_wake
+9: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
jmp 10b
+ cfi_endproc
.size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
index 6e8ffe6f6..669b96a95 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2006,2007,2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,22 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
-
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
.text
@@ -42,11 +31,20 @@
.type __pthread_cond_broadcast, @function
.align 16
__pthread_cond_broadcast:
-
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+ cfi_remember_state
movl 20(%esp), %ebx
@@ -92,8 +90,24 @@ __pthread_cond_broadcast:
8: cmpl $-1, %edi
je 9f
+ /* Do not use requeue for pshared condvars. */
+ testl $PS_BIT, MUTEX_KIND(%edi)
+ jne 9f
+
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 81f
+
/* Wake up all threads. */
- movl $FUTEX_CMP_REQUEUE, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx
+#else
+ movl %gs:PRIVATE_FUTEX, %ecx
+ orl $FUTEX_CMP_REQUEUE, %ecx
+#endif
movl $SYS_futex, %eax
movl $0x7fffffff, %esi
movl $1, %edx
@@ -111,51 +125,113 @@ __pthread_cond_broadcast:
cmpl $0xfffff001, %eax
jae 9f
-10: xorl %eax, %eax
+6: xorl %eax, %eax
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
- .align 16
- /* Unlock. */
-4: LOCK
- subl $1, cond_lock-cond_futex(%ebx)
- jne 5f
+ cfi_restore_state
-6: xorl %eax, %eax
- popl %ebp
- popl %edi
- popl %esi
- popl %ebx
- ret
+81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl $SYS_futex, %eax
+ movl $0x7fffffff, %esi
+ movl $1, %edx
+ /* Get the address of the futex involved. */
+# if MUTEX_FUTEX != 0
+ addl $MUTEX_FUTEX, %edi
+# endif
+ int $0x80
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpl $0xfffff001, %eax
+ jb 6b
+ jmp 9f
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires waekup. */
+ .align 16
+ /* Unlock. */
+4: LOCK
+ subl $1, cond_lock-cond_futex(%ebx)
+ je 6b
+
+ /* Unlock in loop requires wakeup. */
5: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 6b
- /* Unlock in loop requires waekup. */
+ /* Unlock in loop requires wakeup. */
7: leal cond_lock-cond_futex(%ebx), %eax
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 8b
9: /* The futex requeue functionality is not available. */
movl $0x7fffffff, %edx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
ENTER_KERNEL
- jmp 10b
+ jmp 6b
+ cfi_endproc
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
index ec8217987..54e80d059 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2004,2005,2007,2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,20 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-
-#define EINVAL 22
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
.text
@@ -42,8 +33,14 @@
.align 16
__pthread_cond_signal:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
+ cfi_remember_state
movl 12(%esp), %edi
@@ -77,35 +74,141 @@ __pthread_cond_signal:
addl $1, (%ebx)
/* Wake up one thread. */
- movl $FUTEX_WAKE, %ecx
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
+
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ je 8f
+
+ movl dep_mutex-cond_futex(%ebx), %edx
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edx), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 9f
+
+8: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE_OP, %ecx
movl $SYS_futex, %eax
movl $1, %edx
+ movl $1, %esi
+ movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
+ /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+ sysenter.
+ ENTER_KERNEL */
+ int $0x80
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
+ popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpl $-4095, %eax
+ jae 7f
+
+6: xorl %eax, %eax
+ popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ ret
+
+ cfi_restore_state
+
+9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ xorl %esi, %esi
+ movl dep_mutex-cond_futex(%ebx), %edi
+ movl (%ebx), %ebp
+ /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
+ sysenter.
+ ENTER_KERNEL */
+ int $0x80
+ popl %ebp
+ popl %esi
+
+ leal -cond_futex(%ebx), %edi
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpl $-4095, %eax
+ jb 4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ orl $FUTEX_WAKE, %ecx
+
+ xorl $(FUTEX_WAKE ^ FUTEX_WAKE_OP), %ecx
+ movl $SYS_futex, %eax
+ /* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
+ movl $1, %edx */
ENTER_KERNEL
/* Unlock. Note that at this point %edi always points to
cond_lock. */
4: LOCK
subl $1, (%edi)
- jne 5f
+ je 6b
-6: xorl %eax, %eax
- popl %edi
- popl %ebx
- ret
+ /* Unlock in loop requires wakeup. */
+5: movl %edi, %eax
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+ jmp 6b
/* Initial locking failed. */
1:
#if cond_lock == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal cond_lock(%edi), %ecx
+ leal cond_lock(%edi), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%edi)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires wakeup. */
-5: movl %edi, %eax
- call __lll_mutex_unlock_wake
- jmp 6b
+ cfi_endproc
.size __pthread_cond_signal, .-__pthread_cond_signal
weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
index b8f0d2e4b..c56dd7716 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
.text
@@ -40,14 +34,28 @@
.align 16
__pthread_cond_timedwait:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
pushl %ebp
-.Lpush_ebp:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
pushl %edi
-.Lpush_edi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %esi
-.Lpush_esi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %ebx
-.Lpush_ebx:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
movl 20(%esp), %ebx
movl 28(%esp), %ebp
@@ -84,11 +92,12 @@ __pthread_cond_timedwait:
addl $1, total_seq(%ebx)
adcl $0, total_seq+4(%ebx)
addl $1, cond_futex(%ebx)
- addl $(1 << clock_bits), cond_nwaiters(%ebx)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
-#define FRAME_SIZE 24
+#define FRAME_SIZE 32
subl $FRAME_SIZE, %esp
-.Lsubl:
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
/* Get and store current wakeup_seq value. */
movl wakeup_seq(%ebx), %edi
@@ -98,12 +107,14 @@ __pthread_cond_timedwait:
movl %edx, 16(%esp)
movl %eax, 20(%esp)
+ /* Reset the pi-requeued flag. */
+8: movl $0, 24(%esp)
/* Get the current time. */
-8: movl %ebx, %edx
+ movl %ebx, %edx
#ifdef __NR_clock_gettime
/* Get the clock number. */
movl cond_nwaiters(%ebx), %ebx
- andl $((1 << clock_bits) - 1), %ebx
+ andl $((1 << nwaiters_shift) - 1), %ebx
/* Only clocks 0 and 1 are allowed so far. Both are handled in the
kernel. */
leal 4(%esp), %ecx
@@ -124,7 +135,7 @@ __pthread_cond_timedwait:
/* Get the current time. */
leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -149,6 +160,7 @@ __pthread_cond_timedwait:
movl %edx, 8(%esp)
movl cond_futex(%ebx), %edi
+ movl %edi, 28(%esp)
/* Unlock. */
LOCK
@@ -163,9 +175,60 @@ __pthread_cond_timedwait:
4: call __pthread_enable_asynccancel
movl %eax, (%esp)
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ sete %cl
+ je 40f
+
+ movl dep_mutex(%ebx), %edi
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 40f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%ebx)
+ /* XXX Need to implement using sete instead of a jump. */
+ jne 42f
+ orl $FUTEX_CLOCK_REALTIME, %ecx
+
+ /* Requeue-PI uses absolute timeout */
+42: leal (%ebp), %esi
+ movl 28(%esp), %edx
+ addl $cond_futex, %ebx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ subl $cond_futex, %ebx
+ movl %eax, %esi
+ /* Set the pi-requeued flag only if the kernel has returned 0. The
+ kernel does not hold the mutex on ETIMEDOUT or any other error. */
+ cmpl $0, %eax
+ sete 24(%esp)
+ je 41f
+
+ /* Normal and PI futexes dont mix. Use normal futex functions only
+ if the kernel does not support the PI futex functions. */
+ cmpl $-ENOSYS, %eax
+ jne 41f
+ xorl %ecx, %ecx
+
+40: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+ addl $FUTEX_WAIT, %ecx
+#endif
leal 4(%esp), %esi
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %edi, %edx
+ movl 28(%esp), %edx
addl $cond_futex, %ebx
.Ladd_cond_futex:
movl $SYS_futex, %eax
@@ -174,7 +237,7 @@ __pthread_cond_timedwait:
.Lsub_cond_futex:
movl %eax, %esi
- movl (%esp), %eax
+41: movl (%esp), %eax
call __pthread_disable_asynccancel
.LcleanupEND:
@@ -225,7 +288,7 @@ __pthread_cond_timedwait:
14: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-24: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+24: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
movl total_seq(%ebx), %eax
@@ -233,12 +296,23 @@ __pthread_cond_timedwait:
cmpl $0xffffffff, %eax
jne 25f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 25f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -251,11 +325,15 @@ __pthread_cond_timedwait:
#endif
jne 10f
- /* Remove cancellation handler. */
11: movl 24+FRAME_SIZE(%esp), %eax
+ /* With requeue_pi, the mutex lock is held in the kernel. */
+ movl 24(%esp), %ecx
+ testl %ecx, %ecx
+ jnz 27f
+
call __pthread_mutex_cond_lock
- addl $FRAME_SIZE, %esp
-.Laddl:
+26: addl $FRAME_SIZE, %esp
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* We return the result of the mutex_lock operation if it failed. */
testl %eax, %eax
@@ -268,46 +346,118 @@ __pthread_cond_timedwait:
#endif
18: popl %ebx
-.Lpop_ebx:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
-.Lpop_esi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %edi
-.Lpop_edi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %ebp
-.Lpop_ebp:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
ret
+ cfi_restore_state
+
+27: call __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 26b
+
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* Initial locking failed. */
1:
-.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
+ /* The initial unlocking of the mutex failed. */
+16:
+ LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 18b
+
+ movl %eax, %esi
+#if cond_lock == 0
+ movl %ebx, %eax
+#else
+ leal cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+
+ movl %esi, %eax
+ jmp 18b
+
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
/* Unlock in loop requires wakeup. */
3:
-.LSbl2:
#if cond_lock == 0
movl %ebx, %eax
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -317,37 +467,24 @@ __pthread_cond_timedwait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
- jmp 11b
-
- /* The initial unlocking of the mutex failed. */
-16:
-.LSbl3:
- LOCK
-#if cond_lock == 0
- subl $1, (%ebx)
-#else
- subl $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- jne 18b
-
- movl %eax, %esi
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_unlock_wake
-
- movl %esi, %eax
- jmp 18b
+ call __lll_unlock_wake
+ jmp 11b
#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
/* clock_gettime not available. */
-.LSbl4:
19: leal 4(%esp), %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
movl %edx, %ebx
@@ -374,7 +511,6 @@ weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
.type __condvar_tw_cleanup2, @function
__condvar_tw_cleanup2:
subl $cond_futex, %ebx
-.LSbl5:
.size __condvar_tw_cleanup2, .-__condvar_tw_cleanup2
.type __condvar_tw_cleanup, @function
__condvar_tw_cleanup:
@@ -392,25 +528,45 @@ __condvar_tw_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 20(%esp), %eax
jne 3f
- addl $1, wakeup_seq(%ebx)
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movl total_seq(%ebx), %eax
+ movl total_seq+4(%ebx), %edi
+ cmpl wakeup_seq+4(%ebx), %edi
+ jb 6f
+ ja 7f
+ cmpl wakeup_seq(%ebx), %eax
+ jbe 7f
+
+6: addl $1, wakeup_seq(%ebx)
adcl $0, wakeup_seq+4(%ebx)
-
addl $1, cond_futex(%ebx)
- addl $1, woken_seq(%ebx)
+7: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-3: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
xorl %edi, %edi
@@ -419,12 +575,23 @@ __condvar_tw_cleanup:
cmpl $0xffffffff, %eax
jne 4f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 4f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -443,13 +610,34 @@ __condvar_tw_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
jnz 5f
addl $cond_futex, %ebx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
movl $0x7fffffff, %edx
ENTER_KERNEL
@@ -462,4 +650,44 @@ __condvar_tw_cleanup:
call _Unwind_Resume
hlt
.LENDCODE:
+ cfi_endproc
.size __condvar_tw_cleanup, .-__condvar_tw_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format (omit)
+ .byte DW_EH_PE_omit # @TType format (omit)
+ .byte DW_EH_PE_sdata4 # call-site format
+ # DW_EH_PE_sdata4
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .long .LcleanupSTART-.LSTARTCODE
+ .long .Ladd_cond_futex-.LcleanupSTART
+ .long __condvar_tw_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .Ladd_cond_futex-.LSTARTCODE
+ .long .Lsub_cond_futex-.Ladd_cond_futex
+ .long __condvar_tw_cleanup2-.LSTARTCODE
+ .uleb128 0
+ .long .Lsub_cond_futex-.LSTARTCODE
+ .long .LcleanupEND-.Lsub_cond_futex
+ .long __condvar_tw_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .LcallUR-.LSTARTCODE
+ .long .LENDCODE-.LcallUR
+ .long 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
index 377a7340f..ab4ef0a45 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004,2006-2007,2009,2010 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <pthread-errnos.h>
+#include <pthread-pi-defines.h>
+#include <bits/kernel-features.h>
.text
@@ -39,16 +34,31 @@
.align 16
__pthread_cond_wait:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+ pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebp, 0)
pushl %edi
-.Lpush_edi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%edi, 0)
pushl %esi
-.Lpush_esi:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%esi, 0)
pushl %ebx
-.Lpush_ebx:
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset(%ebx, 0)
xorl %esi, %esi
- movl 16(%esp), %ebx
+ movl 20(%esp), %ebx
/* Get internal lock. */
movl $1, %edx
@@ -64,7 +74,7 @@ __pthread_cond_wait:
/* Store the reference to the mutex. If there is already a
different value in there this is a bad user bug. */
2: cmpl $-1, dep_mutex(%ebx)
- movl 20(%esp), %eax
+ movl 24(%esp), %eax
je 15f
movl %eax, dep_mutex(%ebx)
@@ -78,11 +88,12 @@ __pthread_cond_wait:
addl $1, total_seq(%ebx)
adcl $0, total_seq+4(%ebx)
addl $1, cond_futex(%ebx)
- addl $(1 << clock_bits), cond_nwaiters(%ebx)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
-#define FRAME_SIZE 16
+#define FRAME_SIZE 20
subl $FRAME_SIZE, %esp
-.Lsubl:
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
/* Get and store current wakeup_seq value. */
movl wakeup_seq(%ebx), %edi
@@ -92,7 +103,9 @@ __pthread_cond_wait:
movl %edx, 8(%esp)
movl %eax, 12(%esp)
-8: movl cond_futex(%ebx), %edi
+ /* Reset the pi-requeued flag. */
+8: movl $0, 16(%esp)
+ movl cond_futex(%ebx), %ebp
/* Unlock. */
LOCK
@@ -107,8 +120,48 @@ __pthread_cond_wait:
4: call __pthread_enable_asynccancel
movl %eax, (%esp)
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
- movl %edi, %edx
+ xorl %ecx, %ecx
+ cmpl $-1, dep_mutex(%ebx)
+ sete %cl
+ je 18f
+
+ movl dep_mutex(%ebx), %edi
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%edi), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 18f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
+ movl %ebp, %edx
+ xorl %esi, %esi
+ addl $cond_futex, %ebx
+ movl $SYS_futex, %eax
+ ENTER_KERNEL
+ subl $cond_futex, %ebx
+ /* Set the pi-requeued flag only if the kernel has returned 0. The
+ kernel does not hold the mutex on error. */
+ cmpl $0, %eax
+ sete 16(%esp)
+ je 19f
+
+ /* Normal and PI futexes dont mix. Use normal futex functions only
+ if the kernel does not support the PI futex functions. */
+ cmpl $-ENOSYS, %eax
+ jne 19f
+ xorl %ecx, %ecx
+
+18: subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+#if FUTEX_WAIT != 0
+ addl $FUTEX_WAIT, %ecx
+#endif
+ movl %ebp, %edx
addl $cond_futex, %ebx
.Ladd_cond_futex:
movl $SYS_futex, %eax
@@ -116,7 +169,7 @@ __pthread_cond_wait:
subl $cond_futex, %ebx
.Lsub_cond_futex:
- movl (%esp), %eax
+19: movl (%esp), %eax
call __pthread_disable_asynccancel
.LcleanupEND:
@@ -155,7 +208,7 @@ __pthread_cond_wait:
adcl $0, woken_seq+4(%ebx)
/* Unlock */
-16: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
movl total_seq(%ebx), %eax
@@ -163,12 +216,23 @@ __pthread_cond_wait:
cmpl $0xffffffff, %eax
jne 17f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 17f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -181,51 +245,130 @@ __pthread_cond_wait:
#endif
jne 10f
-11: movl 20+FRAME_SIZE(%esp), %eax
+ /* With requeue_pi, the mutex lock is held in the kernel. */
+11: movl 24+FRAME_SIZE(%esp), %eax
+ movl 16(%esp), %ecx
+ testl %ecx, %ecx
+ jnz 21f
+
call __pthread_mutex_cond_lock
- addl $FRAME_SIZE, %esp
-.Laddl:
+20: addl $FRAME_SIZE, %esp
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
14: popl %ebx
-.Lpop_ebx:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
-.Lpop_esi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
popl %edi
-.Lpop_edi:
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
+ popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
/* We return the result of the mutex_lock operation. */
ret
+ cfi_restore_state
+
+21: call __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 20b
+
+ cfi_adjust_cfa_offset(-FRAME_SIZE);
/* Initial locking failed. */
1:
-.LSbl1:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- call __lll_mutex_lock_wait
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_lock_wait
jmp 2b
- /* Unlock in loop requires waekup. */
+ /* The initial unlocking of the mutex failed. */
+12:
+ LOCK
+#if cond_lock == 0
+ subl $1, (%ebx)
+#else
+ subl $1, cond_lock(%ebx)
+#endif
+ jne 14b
+
+ movl %eax, %esi
+#if cond_lock == 0
+ movl %ebx, %eax
+#else
+ leal cond_lock(%ebx), %eax
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
+
+ movl %esi, %eax
+ jmp 14b
+
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
+ /* Unlock in loop requires wakeup. */
3:
-.LSbl2:
#if cond_lock == 0
movl %ebx, %eax
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
jmp 4b
/* Locking in loop failed. */
5:
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
jmp 6b
/* Unlock after loop requires wakeup. */
@@ -235,30 +378,18 @@ __pthread_cond_wait:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
- jmp 11b
-
- /* The initial unlocking of the mutex failed. */
-12:
-.LSbl3:
- LOCK
-#if cond_lock == 0
- subl $1, (%ebx)
-#else
- subl $1, cond_lock(%ebx)
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
#endif
- jne 14b
-
- movl %eax, %esi
-#if cond_lock == 0
- movl %ebx, %eax
-#else
- leal cond_lock(%ebx), %eax
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_unlock_wake
-
- movl %esi, %eax
- jmp 14b
+ call __lll_unlock_wake
+ jmp 11b
.size __pthread_cond_wait, .-__pthread_cond_wait
weak_alias(__pthread_cond_wait, pthread_cond_wait)
@@ -284,25 +415,45 @@ __condvar_w_cleanup:
jz 1f
#if cond_lock == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal cond_lock(%ebx), %ecx
+ leal cond_lock(%ebx), %edx
+#endif
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
#endif
- call __lll_mutex_lock_wait
+ call __lll_lock_wait
1: movl broadcast_seq(%ebx), %eax
cmpl 12(%esp), %eax
jne 3f
- addl $1, wakeup_seq(%ebx)
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movl total_seq(%ebx), %eax
+ movl total_seq+4(%ebx), %edi
+ cmpl wakeup_seq+4(%ebx), %edi
+ jb 6f
+ ja 7f
+ cmpl wakeup_seq(%ebx), %eax
+ jbe 7f
+
+6: addl $1, wakeup_seq(%ebx)
adcl $0, wakeup_seq+4(%ebx)
-
addl $1, cond_futex(%ebx)
- addl $1, woken_seq(%ebx)
+7: addl $1, woken_seq(%ebx)
adcl $0, woken_seq+4(%ebx)
-3: subl $(1 << clock_bits), cond_nwaiters(%ebx)
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
/* Wake up a thread which wants to destroy the condvar object. */
xorl %edi, %edi
@@ -311,12 +462,23 @@ __condvar_w_cleanup:
cmpl $0xffffffff, %eax
jne 4f
movl cond_nwaiters(%ebx), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 4f
addl $cond_nwaiters, %ebx
movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $1, %edx
ENTER_KERNEL
subl $cond_nwaiters, %ebx
@@ -335,18 +497,39 @@ __condvar_w_cleanup:
#else
leal cond_lock(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+#if (LLL_SHARED-LLL_PRIVATE) > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex(%ebx)
+ setne %cl
+ subl $1, %ecx
+ andl $(LLL_SHARED-LLL_PRIVATE), %ecx
+#if LLL_PRIVATE != 0
+ addl $LLL_PRIVATE, %ecx
+#endif
+ call __lll_unlock_wake
/* Wake up all waiters to make sure no signal gets lost. */
2: testl %edi, %edi
jnz 5f
addl $cond_futex, %ebx
- movl $FUTEX_WAKE, %ecx
+#if FUTEX_PRIVATE_FLAG > 255
+ xorl %ecx, %ecx
+#endif
+ cmpl $-1, dep_mutex-cond_futex(%ebx)
+ sete %cl
+ subl $1, %ecx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %ecx
+#else
+ andl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $FUTEX_WAKE, %ecx
movl $SYS_futex, %eax
movl $0x7fffffff, %edx
ENTER_KERNEL
-5: movl 20+FRAME_SIZE(%esp), %eax
+5: movl 24+FRAME_SIZE(%esp), %eax
call __pthread_mutex_cond_lock
movl %esi, (%esp)
@@ -354,4 +537,54 @@ __condvar_w_cleanup:
call _Unwind_Resume
hlt
.LENDCODE:
+ cfi_endproc
.size __condvar_w_cleanup, .-__condvar_w_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format (omit)
+ .byte DW_EH_PE_omit # @TType format (omit)
+ .byte DW_EH_PE_sdata4 # call-site format
+ # DW_EH_PE_sdata4
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .long .LcleanupSTART-.LSTARTCODE
+ .long .Ladd_cond_futex-.LcleanupSTART
+ .long __condvar_w_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .Ladd_cond_futex-.LSTARTCODE
+ .long .Lsub_cond_futex-.Ladd_cond_futex
+ .long __condvar_w_cleanup2-.LSTARTCODE
+ .uleb128 0
+ .long .Lsub_cond_futex-.LSTARTCODE
+ .long .LcleanupEND-.Lsub_cond_futex
+ .long __condvar_w_cleanup-.LSTARTCODE
+ .uleb128 0
+ .long .LcallUR-.LSTARTCODE
+ .long .LENDCODE-.LcallUR
+ .long 0
+ .uleb128 0
+.Lcstend:
+
+#ifdef PIC
+ .section .gnu.linkonce.t.__i686.get_pc_thunk.cx,"ax",@progbits
+ .globl __i686.get_pc_thunk.cx
+ .hidden __i686.get_pc_thunk.cx
+ .type __i686.get_pc_thunk.cx,@function
+__i686.get_pc_thunk.cx:
+ movl (%esp), %ecx;
+ ret
+ .size __i686.get_pc_thunk.cx,.-__i686.get_pc_thunk.cx
+#endif
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
index aec79f07e..d181393e6 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_rdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,8 +30,13 @@
.type __pthread_rwlock_rdlock,@function
.align 16
__pthread_rwlock_rdlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
xorl %esi, %esi
movl 12(%esp), %ebx
@@ -61,7 +57,7 @@ __pthread_rwlock_rdlock:
jne 14f
cmpl $0, WRITERS_QUEUED(%ebx)
je 5f
- cmpl $0, FLAGS(%ebx)
+ cmpb $0, FLAGS(%ebx)
je 5f
3: addl $1, READERS_QUEUED(%ebx)
@@ -77,8 +73,18 @@ __pthread_rwlock_rdlock:
#endif
jne 10f
-11: addl $READERS_WAKEUP, %ebx
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebx), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $READERS_WAKEUP, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -98,7 +104,7 @@ __pthread_rwlock_rdlock:
13: subl $1, READERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebx)
je 8f
9: LOCK
@@ -110,24 +116,32 @@ __pthread_rwlock_rdlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
/* Deadlock detected. */
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -136,17 +150,18 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -155,17 +170,20 @@ __pthread_rwlock_rdlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
.globl pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
index 3717d7ef5..1ffdf33fe 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedrdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,11 +30,21 @@
.type pthread_rwlock_timedrdlock,@function
.align 16
pthread_rwlock_timedrdlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl 28(%esp), %ebp
movl 32(%esp), %edi
@@ -64,7 +65,7 @@ pthread_rwlock_timedrdlock:
jne 14f
cmpl $0, WRITERS_QUEUED(%ebp)
je 5f
- cmpl $0, FLAGS(%ebp)
+ cmpb $0, FLAGS(%ebp)
je 5f
/* Check the value of the timeout parameter. */
@@ -87,7 +88,7 @@ pthread_rwlock_timedrdlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -107,13 +108,23 @@ pthread_rwlock_timedrdlock:
/* Futex call. */
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
+
movl %esi, %edx
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebp), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl %esp, %esi
leal READERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
+ movl %eax, %esi
17:
/* Reget the lock. */
@@ -128,14 +139,14 @@ pthread_rwlock_timedrdlock:
jnz 12f
13: subl $1, READERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %ecx
+ cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
addl $1, NR_READERS(%ebp)
je 8f
9: LOCK
@@ -146,27 +157,42 @@ pthread_rwlock_timedrdlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
+ cfi_adjust_cfa_offset(-8)
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(24)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -175,17 +201,18 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
8: subl $1, NR_READERS(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
/* Overflow. */
4: subl $1, READERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -194,21 +221,24 @@ pthread_rwlock_timedrdlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %ecx
+16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
index 09c9e30ca..5826f02e6 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_timedwrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,11 +30,21 @@
.type pthread_rwlock_timedwrlock,@function
.align 16
pthread_rwlock_timedwrlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %ebp
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
subl $8, %esp
+ cfi_adjust_cfa_offset(8)
movl 28(%esp), %ebp
movl 32(%esp), %edi
@@ -85,7 +86,7 @@ pthread_rwlock_timedwrlock:
/* Get current time. */
11: movl %esp, %ebx
xorl %ecx, %ecx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -105,13 +106,23 @@ pthread_rwlock_timedwrlock:
/* Futex call. */
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
+
movl %esi, %edx
- xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebp), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebp), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl %esp, %esi
leal WRITERS_WAKEUP(%ebp), %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
- movl %eax, %ecx
+ movl %eax, %esi
17:
/* Reget the lock. */
@@ -126,14 +137,14 @@ pthread_rwlock_timedwrlock:
jnz 12f
13: subl $1, WRITERS_QUEUED(%ebp)
- cmpl $-ETIMEDOUT, %ecx
+ cmpl $-ETIMEDOUT, %esi
jne 2b
-18: movl $ETIMEDOUT, %ecx
+18: movl $ETIMEDOUT, %edx
jmp 9f
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebp)
9: LOCK
@@ -144,27 +155,42 @@ pthread_rwlock_timedwrlock:
#endif
jne 6f
-7: movl %ecx, %eax
+7: movl %edx, %eax
addl $8, %esp
+ cfi_adjust_cfa_offset(-8)
popl %ebp
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebp)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(24)
+ cfi_offset(%esi, -8)
+ cfi_offset(%edi, -12)
+ cfi_offset(%ebx, -16)
+ cfi_offset(%ebp, -20)
1:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID, %eax
jne 3b
-20: movl $EDEADLK, %ecx
+20: movl $EDEADLK, %edx
jmp 9b
6:
@@ -173,12 +199,13 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 7b
/* Overflow. */
4: subl $1, WRITERS_QUEUED(%ebp)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -187,21 +214,24 @@ pthread_rwlock_timedwrlock:
#else
leal MUTEX(%ebp), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebp, %ecx
+ movl %ebp, %edx
#else
- leal MUTEX(%ebp), %ecx
+ leal MUTEX(%ebp), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebp), %ecx
+ call __lll_lock_wait
jmp 13b
-16: movl $-ETIMEDOUT, %ecx
+16: movl $-ETIMEDOUT, %esi
jmp 17b
-19: movl $EINVAL, %ecx
+19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
index 597c82fa8..0130261c7 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_unlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -37,8 +29,13 @@
.type __pthread_rwlock_unlock,@function
.align 16
__pthread_rwlock_unlock:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%edi, -12)
movl 12(%esp), %edi
@@ -60,9 +57,8 @@ __pthread_rwlock_unlock:
5: movl $0, WRITER(%edi)
- movl $1, %ecx
+ movl $1, %edx
leal WRITERS_WAKEUP(%edi), %ebx
- movl %ecx, %edx
cmpl $0, WRITERS_QUEUED(%edi)
jne 0f
@@ -82,14 +78,30 @@ __pthread_rwlock_unlock:
#endif
jne 7f
-8: movl $SYS_futex, %eax
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%edi), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %ecx
+#else
+ movzbl PSHARED(%edi), %ecx
+ orl $FUTEX_WAKE, %ecx
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ movl $SYS_futex, %eax
ENTER_KERNEL
xorl %eax, %eax
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%edi)
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%ebx, -8)
+ cfi_offset(%edi, -12)
.align 16
6: LOCK
#if MUTEX == 0
@@ -106,31 +118,34 @@ __pthread_rwlock_unlock:
1:
#if MUTEX == 0
- movl %edi, %ecx
+ movl %edi, %edx
#else
- leal MUTEX(%edx), %ecx
+ leal MUTEX(%edi), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%edi), %ecx
+ call __lll_lock_wait
jmp 2b
3:
#if MUTEX == 0
movl %edi, %eax
#else
- leal MUTEX(%edx), %eax
+ leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 4b
7:
#if MUTEX == 0
movl %edi, %eax
#else
- leal MUTEX(%edx), %eax
+ leal MUTEX(%edi), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%edi), %ecx
+ call __lll_unlock_wake
jmp 8b
-
+ cfi_endproc
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
index bb384a267..f69c49b15 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_rwlock_wrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,18 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -38,8 +30,13 @@
.type __pthread_rwlock_wrlock,@function
.align 16
__pthread_rwlock_wrlock:
+ cfi_startproc
pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
xorl %esi, %esi
movl 12(%esp), %ebx
@@ -74,8 +71,18 @@ __pthread_rwlock_wrlock:
#endif
jne 10f
-11: addl $WRITERS_WAKEUP, %ebx
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movzbl PSHARED(%ebx), %ecx
+ xorl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %ecx
+#else
+ movzbl PSHARED(%ebx), %ecx
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %ecx
+# endif
+ xorl %gs:PRIVATE_FUTEX, %ecx
+#endif
+ addl $WRITERS_WAKEUP, %ebx
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -95,7 +102,7 @@ __pthread_rwlock_wrlock:
13: subl $1, WRITERS_QUEUED(%ebx)
jmp 2b
-5: xorl %ecx, %ecx
+5: xorl %edx, %edx
movl %gs:TID, %eax
movl %eax, WRITER(%ebx)
9: LOCK
@@ -107,23 +114,31 @@ __pthread_rwlock_wrlock:
jne 6f
7:
- movl %ecx, %eax
+ movl %edx, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
ret
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%esi, -8)
+ cfi_offset(%ebx, -12)
1:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 2b
14: cmpl %gs:TID , %eax
jne 3b
- movl $EDEADLK, %ecx
+ movl $EDEADLK, %edx
jmp 9b
6:
@@ -132,11 +147,12 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 7b
4: subl $1, WRITERS_QUEUED(%ebx)
- movl $EAGAIN, %ecx
+ movl $EAGAIN, %edx
jmp 9b
10:
@@ -145,17 +161,20 @@ __pthread_rwlock_wrlock:
#else
leal MUTEX(%ebx), %eax
#endif
- call __lll_mutex_unlock_wake
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_unlock_wake
jmp 11b
12:
#if MUTEX == 0
- movl %ebx, %ecx
+ movl %ebx, %edx
#else
- leal MUTEX(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
#endif
- call __lll_mutex_lock_wait
+ movzbl PSHARED(%ebx), %ecx
+ call __lll_lock_wait
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
.globl pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
index a0dc39c8f..b077a20ca 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_post.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,15 +19,8 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE 1
+#include <structsem.h>
+#include <lowlevellock.h>
.text
@@ -36,25 +29,49 @@
.type __new_sem_post,@function
.align 16
__new_sem_post:
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
movl 8(%esp), %ebx
- movl $1, %edx
+
+#if VALUE == 0
+ movl (%ebx), %eax
+#else
+ movl VALUE(%ebx), %eax
+#endif
+0: cmpl $SEM_VALUE_MAX, %eax
+ je 3f
+ leal 1(%eax), %edx
LOCK
- xaddl %edx, (%ebx)
+#if VALUE == 0
+ cmpxchgl %edx, (%ebx)
+#else
+ cmpxchgl %edx, VALUE(%ebx)
+#endif
+ jnz 0b
+
+ cmpl $0, NWAITERS(%ebx)
+ je 2f
- movl $SYS_futex, %eax
movl $FUTEX_WAKE, %ecx
- addl $1, %edx
+ orl PRIVATE(%ebx), %ecx
+ movl $1, %edx
+ movl $SYS_futex, %eax
ENTER_KERNEL
testl %eax, %eax
js 1f
- xorl %eax, %eax
+2: xorl %eax, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
1:
#ifdef __PIC__
call __x86.get_pc_thunk.bx
@@ -80,6 +97,35 @@ __new_sem_post:
orl $-1, %eax
popl %ebx
ret
+
+3:
+#ifdef __PIC__
+ call __x86.get_pc_thunk.bx
+#else
+ movl $5f, %ebx
+5:
+#endif
+ addl $_GLOBAL_OFFSET_TABLE_, %ebx
+#if USE___THREAD
+# ifdef NO_TLS_DIRECT_SEG_REFS
+ movl errno@gotntpoff(%ebx), %edx
+ addl %gs:0, %edx
+ movl $EOVERFLOW, (%edx)
+# else
+ movl errno@gotntpoff(%ebx), %edx
+ movl $EOVERFLOW, %gs:(%edx)
+# endif
+#else
+ call __errno_location@plt
+ movl $EOVERFLOW, (%eax)
+#endif
+
+ orl $-1, %eax
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
+ ret
+ cfi_endproc
.size __new_sem_post,.-__new_sem_post
weak_alias(__new_sem_post, sem_post)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
index 972b49fac..218b12f9c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,15 +19,13 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
@@ -35,55 +33,46 @@
.globl sem_timedwait
.type sem_timedwait,@function
.align 16
- cfi_startproc
sem_timedwait:
- /* First check for cancellation. */
- movl %gs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 10f
-
+.LSTARTCODE:
movl 4(%esp), %ecx
movl (%ecx), %eax
2: testl %eax, %eax
- je,pn 1f
+ je 1f
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ecx)
- jne,pn 2b
+ jne 2b
xorl %eax, %eax
ret
/* Check whether the timeout value is valid. */
1: pushl %esi
- cfi_adjust_cfa_offset(4)
+.Lpush_esi:
pushl %edi
- cfi_adjust_cfa_offset(4)
+.Lpush_edi:
pushl %ebx
- cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
subl $12, %esp
- cfi_adjust_cfa_offset(12)
+.Lsub_esp:
movl 32(%esp), %edi
- cfi_offset(7, -12) /* %edi */
/* Check for invalid nanosecond field. */
cmpl $1000000000, 4(%edi)
movl $EINVAL, %esi
- cfi_offset(6, -8) /* %esi */
jae 6f
- cfi_offset(3, -16) /* %ebx */
-7: call __pthread_enable_asynccancel
- movl %eax, 8(%esp)
+ LOCK
+ incl NWAITERS(%ecx)
- xorl %ecx, %ecx
+7: xorl %ecx, %ecx
movl %esp, %ebx
movl %ecx, %edx
- movl $SYS_gettimeofday, %eax
+ movl $__NR_gettimeofday, %eax
ENTER_KERNEL
/* Compute relative timeout. */
@@ -103,19 +92,30 @@ sem_timedwait:
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
- movl 28(%esp), %ebx
- xorl %ecx, %ecx
+
+.LcleanupSTART:
+ call __pthread_enable_asynccancel
+ movl %eax, 8(%esp)
+
+ movl 28(%esp), %ebx /* Load semaphore address. */
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
movl %esp, %esi
- movl $SYS_futex, %eax
xorl %edx, %edx
+ movl $SYS_futex, %eax
ENTER_KERNEL
movl %eax, %esi
movl 8(%esp), %eax
call __pthread_disable_asynccancel
+.LcleanupEND:
testl %esi, %esi
- je,pt 9f
+ je 9f
cmpl $-EWOULDBLOCK, %esi
jne 3f
@@ -126,29 +126,27 @@ sem_timedwait:
leal -1(%eax), %ecx
LOCK
cmpxchgl %ecx, (%ebx)
- jne,pn 8b
+ jne 8b
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
xorl %eax, %eax
+
+ LOCK
+ decl NWAITERS(%ebx)
+
+10: addl $12, %esp
+.Ladd_esp:
popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(3)
+.Lpop_ebx:
popl %edi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(7)
+.Lpop_edi:
popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(6)
+.Lpop_esi:
ret
- cfi_adjust_cfa_offset(24)
- cfi_offset(6, -8) /* %esi */
- cfi_offset(7, -12) /* %edi */
- cfi_offset(3, -16) /* %ebx */
+.Lafter_ret:
3: negl %esi
6:
-#ifdef __PIC__
+#ifdef PIC
call __x86.get_pc_thunk.bx
#else
movl $4f, %ebx
@@ -169,25 +167,163 @@ sem_timedwait:
movl %esi, (%eax)
#endif
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
+ movl 28(%esp), %ebx /* Load semaphore address. */
orl $-1, %eax
- popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(3)
- popl %edi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(7)
- popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(6)
- ret
+ jmp 10b
+ .size sem_timedwait,.-sem_timedwait
+
-10: /* Canceled. */
- movl $0xffffffff, %gs:RESULT
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
LOCK
- orl $0x10, %gs:CANCELHANDLING
- movl %gs:CLEANUP_JMP_BUF, %eax
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
- cfi_endproc
- .size sem_timedwait,.-sem_timedwait
+ decl NWAITERS(%ebx)
+ movl %eax, (%esp)
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff # @LPStart format (omit)
+ .byte 0xff # @TType format (omit)
+ .byte 0x01 # call-site format
+ # DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zPLR" # NUL-terminated augmentation
+ # string.
+#else
+ .string "zPL" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -4 # Data alignment factor.
+ .byte 8 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 7 # Augmentation value length.
+ .byte 0x9b # Personality: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4
+ # + DW_EH_PE_indirect
+ .long DW.ref.__gcc_personality_v0-.
+ .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+ .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 # Augmentation value length.
+ .byte 0x0 # Personality: absolute
+ .long __gcc_personality_v0
+ .byte 0x0 # LSDA Encoding: absolute
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 # DW_CFA_offset, column 0x10
+ .uleb128 1
+ .align 4
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code.
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 4 # Augmentation size
+#ifdef SHARED
+ .long .LexceptSTART-.
+#else
+ .long .LexceptSTART
+#endif
+
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_esi-.LSTARTCODE
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 2
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_edi-.Lpush_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x87 # DW_CFA_offset %edi
+ .uleb128 3
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_ebx-.Lpush_edi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 4
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lsub_esp-.Lpush_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 28
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Ladd_esp-.Lsub_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_ebx-.Ladd_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0xc3 # DW_CFA_restore %ebx
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_edi-.Lpop_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0xc7 # DW_CFA_restore %edi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpop_esi-.Lpop_edi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0xc6 # DW_CFA_restore %esi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lafter_ret-.Lpop_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 28
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 2
+ .byte 0x87 # DW_CFA_offset %edi
+ .uleb128 3
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 4
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
index 7db64820f..dad96858f 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_trywait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,13 +19,7 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <lowlevellock.h>
.text
@@ -42,7 +36,7 @@ __new_sem_trywait:
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ecx)
- jne,pn 2b
+ jne 2b
xorl %eax, %eax
ret
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
index c3e6cbce6..b1c32ee4d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,86 +19,98 @@
#include <sysdep.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
+#include <structsem.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
.globl __new_sem_wait
.type __new_sem_wait,@function
.align 16
- cfi_startproc
__new_sem_wait:
- /* First check for cancellation. */
- movl %gs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 5f
-
+.LSTARTCODE:
pushl %ebx
- cfi_adjust_cfa_offset(4)
+.Lpush_ebx:
pushl %esi
- cfi_adjust_cfa_offset(4)
+.Lpush_esi:
subl $4, %esp
- cfi_adjust_cfa_offset(4)
+.Lsub_esp:
movl 16(%esp), %ebx
- cfi_offset(3, -8) /* %ebx */
- cfi_offset(6, -12) /* %esi */
-3: movl (%ebx), %eax
+ movl (%ebx), %eax
2: testl %eax, %eax
- je,pn 1f
+ je 1f
leal -1(%eax), %edx
LOCK
cmpxchgl %edx, (%ebx)
- jne,pn 2b
- xorl %eax, %eax
+ jne 2b
+7: xorl %eax, %eax
- movl 4(%esp), %esi
- cfi_restore(6)
+9: movl 4(%esp), %esi
movl 8(%esp), %ebx
- cfi_restore(3)
addl $12, %esp
- cfi_adjust_cfa_offset(-12)
+.Ladd_esp:
ret
- cfi_adjust_cfa_offset(8)
- cfi_offset(3, -8) /* %ebx */
- cfi_offset(6, -12) /* %esi */
-1: call __pthread_enable_asynccancel
+.Lafter_ret:
+1: LOCK
+ incl NWAITERS(%ebx)
+
+.LcleanupSTART:
+6: call __pthread_enable_asynccancel
movl %eax, (%esp)
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
xorl %esi, %esi
+ xorl %edx, %edx
movl $SYS_futex, %eax
- movl %esi, %ecx
- movl %esi, %edx
ENTER_KERNEL
movl %eax, %esi
movl (%esp), %eax
call __pthread_disable_asynccancel
+.LcleanupEND:
testl %esi, %esi
- je 3b
+ je 3f
cmpl $-EWOULDBLOCK, %esi
- je 3b
+ jne 4f
+
+3:
+ movl (%ebx), %eax
+5: testl %eax, %eax
+ je 6b
+
+ leal -1(%eax), %edx
+ LOCK
+ cmpxchgl %edx, (%ebx)
+ jne 5b
+
+ LOCK
+ decl NWAITERS(%ebx)
+ jmp 7b
+
+4: LOCK
+ decl NWAITERS(%ebx)
+
negl %esi
#ifdef __PIC__
call __x86.get_pc_thunk.bx
#else
- movl $4f, %ebx
-4:
+ movl $8f, %ebx
+8:
#endif
addl $_GLOBAL_OFFSET_TABLE_, %ebx
#if USE___THREAD
@@ -115,20 +127,143 @@ __new_sem_wait:
movl %esi, (%eax)
#endif
orl $-1, %eax
- movl 4(%esp), %esi
- cfi_restore(6)
- movl 8(%esp), %ebx
- cfi_restore(3)
- addl $12, %esp
- cfi_adjust_cfa_offset(-12)
- ret
-5: /* Canceled. */
- movl $0xffffffff, %gs:RESULT
- LOCK
- orl $0x10, %gs:CANCELHANDLING
- movl %gs:CLEANUP_JMP_BUF, %eax
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
- cfi_endproc
+ jmp 9b
.size __new_sem_wait,.-__new_sem_wait
weak_alias(__new_sem_wait, sem_wait)
+
+
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ LOCK
+ decl NWAITERS(%ebx)
+ movl %eax, (%esp)
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff # @LPStart format (omit)
+ .byte 0xff # @TType format (omit)
+ .byte 0x01 # call-site format
+ # DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .long .LENDCIE-.LSTARTCIE # Length of the CIE.
+.LSTARTCIE:
+ .long 0 # CIE ID.
+ .byte 1 # Version number.
+#ifdef SHARED
+ .string "zPLR" # NUL-terminated augmentation
+ # string.
+#else
+ .string "zPL" # NUL-terminated augmentation
+ # string.
+#endif
+ .uleb128 1 # Code alignment factor.
+ .sleb128 -4 # Data alignment factor.
+ .byte 8 # Return address register
+ # column.
+#ifdef SHARED
+ .uleb128 7 # Augmentation value length.
+ .byte 0x9b # Personality: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4
+ # + DW_EH_PE_indirect
+ .long DW.ref.__gcc_personality_v0-.
+ .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+ .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
+ # + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 # Augmentation value length.
+ .byte 0x0 # Personality: absolute
+ .long __gcc_personality_v0
+ .byte 0x0 # LSDA Encoding: absolute
+#endif
+ .byte 0x0c # DW_CFA_def_cfa
+ .uleb128 4
+ .uleb128 4
+ .byte 0x88 # DW_CFA_offset, column 0x10
+ .uleb128 1
+ .align 4
+.LENDCIE:
+
+ .long .LENDFDE-.LSTARTFDE # Length of the FDE.
+.LSTARTFDE:
+ .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
+#ifdef SHARED
+ .long .LSTARTCODE-. # PC-relative start address
+ # of the code.
+#else
+ .long .LSTARTCODE # Start address of the code.
+#endif
+ .long .LENDCODE-.LSTARTCODE # Length of the code.
+ .uleb128 4 # Augmentation size
+#ifdef SHARED
+ .long .LexceptSTART-.
+#else
+ .long .LexceptSTART
+#endif
+
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_ebx-.LSTARTCODE
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 2
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lpush_esi-.Lpush_ebx
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 3
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lsub_esp-.Lpush_esi
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Ladd_esp-.Lsub_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0xc3 # DW_CFA_restore %ebx
+ .byte 0xc6 # DW_CFA_restore %esi
+ .byte 4 # DW_CFA_advance_loc4
+ .long .Lafter_ret-.Ladd_esp
+ .byte 14 # DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x83 # DW_CFA_offset %ebx
+ .uleb128 2
+ .byte 0x86 # DW_CFA_offset %esi
+ .uleb128 3
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
new file mode 100644
index 000000000..f768e16a7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
new file mode 100644
index 000000000..f768e16a7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 97f3b09e2..55add8b8e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,36 +20,87 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-#include <tls.h>
-
-#ifndef LOCK_INSTR
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
+# endif
#endif
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG); \
+ __asm__ ("andl %%gs:%P1, %0" : "+r" (__fl) \
+ : "i" (offsetof (struct pthread, header.private_futex))); \
+ __fl | (fl); }))
+# endif
+#endif
+#ifndef __ASSEMBLER__
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
#ifdef __PIC__
@@ -60,247 +111,436 @@ typedef int lll_lock_t;
# define LLL_EBX_REG "b"
#endif
-#define LLL_ENTER_KERNEL "int $0x80\n\t"
+#ifdef I386_USE_SYSENTER
+# ifdef SHARED
+# define LLL_ENTER_KERNEL "call *%%gs:%P6\n\t"
+# else
+# define LLL_ENTER_KERNEL "call *_dl_sysinfo\n\t"
+# endif
+#else
+# define LLL_ENTER_KERNEL "int $0x80\n\t"
+#endif
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP __asm__ ("rep; nop")
-
-#define lll_futex_wait(futex, val) \
- lll_futex_timed_wait (futex, val, NULL)
-
-#define lll_futex_timed_wait(futex, val, timeout) \
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+ ".section .eh_frame,\"a\",@progbits\n" \
+"5:\t" ".long 7f-6f # Length of Common Information Entry\n" \
+"6:\t" ".long 0x0 # CIE Identifier Tag\n\t" \
+ ".byte 0x1 # CIE Version\n\t" \
+ ".ascii \"zR\\0\" # CIE Augmentation\n\t" \
+ ".uleb128 0x1 # CIE Code Alignment Factor\n\t" \
+ ".sleb128 -4 # CIE Data Alignment Factor\n\t" \
+ ".byte 0x8 # CIE RA Column\n\t" \
+ ".uleb128 0x1 # Augmentation size\n\t" \
+ ".byte 0x1b # FDE Encoding (pcrel sdata4)\n\t" \
+ ".byte 0xc # DW_CFA_def_cfa\n\t" \
+ ".uleb128 0x4\n\t" \
+ ".uleb128 0x0\n\t" \
+ ".align 4\n" \
+"7:\t" ".long 17f-8f # FDE Length\n" \
+"8:\t" ".long 8b-5b # FDE CIE offset\n\t" \
+ ".long 1b-. # FDE initial location\n\t" \
+ ".long 4b-1b # FDE address range\n\t" \
+ ".uleb128 0x0 # Augmentation size\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 10f-9f\n" \
+"9:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 12f-11f\n" \
+"11:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-2b\n" \
+"12:\t" ".byte 0x40 + (3b-2b-1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 16f-13f\n" \
+"13:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 15f-14f\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"14:\t" ".4byte 3b-.\n\t" \
+ ".byte 0x1c # DW_OP_minus\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"15:\t" ".4byte 18f-.\n\t" \
+ ".byte 0x22 # DW_OP_plus\n" \
+"16:\t" ".align 4\n" \
+"17:\t" ".previous\n"
+
+/* Unwind info for
+ 1: lea ..., ...
+ 2: call ...
+ 3: jmp 18f
+ 4:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_3 \
+LLL_STUB_UNWIND_INFO_START \
+"10:\t" ".byte 0x40 + (2b-1b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+ 1: lea ..., ...
+ 0: movl ..., ...
+ 2: call ...
+ 3: jmp 18f
+ 4:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_4 \
+LLL_STUB_UNWIND_INFO_START \
+"10:\t" ".byte 0x40 + (0b-1b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x8\n\t" \
+ ".uleb128 20f-19f\n" \
+"19:\t" ".byte 0x78 # DW_OP_breg8\n\t" \
+ ".sleb128 3b-0b\n" \
+"20:\t" ".byte 0x40 + (2b-0b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait (futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
({ \
- int __ret; \
- register __typeof (val) _val __asm__ ("edx") = (val); \
+ int __status; \
+ register __typeof (val) _val __asm__ ("edx") = (val); \
__asm__ __volatile (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
- : "=a" (__ret) \
+ : "=a" (__status) \
: "0" (SYS_futex), LLL_EBX_REG (futex), "S" (timeout), \
- "c" (FUTEX_WAIT), "d" (_val), \
- "i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
+ "c" (__lll_private_flag (FUTEX_WAIT, private)), \
+ "d" (_val), "i" (offsetof (tcbhead_t, sysinfo)) \
+ : "memory"); \
+ __status; \
+ })
-#define lll_futex_wake(futex, nr) \
- ({ \
- int __ret; \
+#define lll_futex_wake(futex, nr, private) \
+ do { \
+ int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
__asm__ __volatile (LLL_EBX_LOAD \
LLL_ENTER_KERNEL \
LLL_EBX_LOAD \
- : "=a" (__ret) \
+ : "=a" (__ignore) \
: "0" (SYS_futex), LLL_EBX_REG (futex), \
- "c" (FUTEX_WAKE), "d" (_nr), \
+ "c" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr), \
"i" (0) /* phony, to align next arg's number */, \
"i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
-
-
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-/* Does not preserve %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime)
- __attribute ((regparm (3))) attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
+ } while (0)
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, %%gs:%P5\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %2, %1"
+#endif
+
+#define lll_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (__lll_trylock_asm \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ ret; })
+
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %1, %2\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, %%gs:%P6\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tcmpxchgl %1, %2\n\t"
+#endif
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=,@function\n" \
- "_L_mutex_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex) \
- : "memory"); })
-
-
-/* Special version of lll_mutex_lock which causes the unlock function to
- always wakeup waiters. */
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_cond_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_cond_lock_%=,@function\n" \
- "_L_mutex_cond_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (2), "m" (futex) \
- : "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int _result, ignore1, ignore2; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "jnz _L_mutex_timedlock_%=\n\t" \
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%ecx\n" \
+ "2:\tcall __lll_lock_wait_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
+ : "0" (0), "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore3; \
+ __asm__ __volatile (__lll_lock_asm_start \
+ "jnz _L_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=,@function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %8, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), \
+ "=m" (futex), "=&d" (ignore3) \
+ : "1" (1), "m" (futex), \
+ "i" (MULTIPLE_THREADS_OFFSET), "0" (0), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ } \
+ })
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int __result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_robust_lock_%=\n\t" \
".subsection 1\n\t" \
- ".type _L_mutex_timedlock_%=,@function\n" \
- "_L_mutex_timedlock_%=:\n\t" \
- "leal %3, %%ecx\n\t" \
- "movl %7, %%edx\n\t" \
- "call __lll_mutex_timedlock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
+ ".type _L_robust_lock_%=,@function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_lock_%=, 4b-1b\n\t" \
".previous\n" \
- "1:" \
- : "=a" (_result), "=c" (ignore1), "=&d" (ignore2), \
- "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), "m" (timeout) \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id), "m" (futex), "g" ((int) (private))\
: "memory"); \
- _result; })
-
-
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile (LOCK_INSTR "subl $1,%0\n\t" \
- "jne _L_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=,@function\n" \
- "_L_mutex_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_mutex_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex) \
- : "memory"); })
-
-
-#define lll_mutex_islocked(futex) \
- (futex != 0)
-
-
-extern int __lll_lock_wait (int val, int *__futex)
- __attribute ((regparm (2))) attribute_hidden;
-extern int __lll_unlock_wake (int *__futex)
- __attribute ((regparm (1))) attribute_hidden;
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
+ __result; })
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
+/* Special version of lll_lock which causes the unlock function to
+ always wakeup waiters. */
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_cond_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=,@function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_cond_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (ignore1), "=c" (ignore2), "=m" (futex), \
+ "=&d" (ignore3) \
+ : "0" (0), "1" (2), "m" (futex), "g" ((int) (private))\
+ : "memory"); \
+ })
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int __result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
+ "jnz _L_robust_cond_lock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_cond_lock_%=,@function\n" \
+ "_L_robust_cond_lock_%=:\n" \
+ "1:\tleal %2, %%edx\n" \
+ "0:\tmovl %7, %%ecx\n" \
+ "2:\tcall __lll_robust_lock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_cond_lock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=m" (futex), \
+ "=&d" (ignore2) \
+ : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ __result; })
-#if defined NOT_IN_libc
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
-#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used. */
+#define lll_timedlock(futex, timeout, private) \
+ ({ int __result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ "jnz _L_timedlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_timedlock_%=,@function\n" \
+ "_L_timedlock_%=:\n" \
+ "1:\tleal %3, %%ecx\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_timedlock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_timedlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=&d" (ignore2), \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "4" ((int) (private)) \
+ : "memory"); \
+ __result; })
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm__ __volatile ("cmpl $0, %%gs:%P5\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %2, %1; setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int __result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
+ "jnz _L_robust_timedlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_timedlock_%=,@function\n" \
+ "_L_robust_timedlock_%=:\n" \
+ "1:\tleal %3, %%ecx\n" \
+ "0:\tmovl %8, %%edx\n" \
+ "2:\tcall __lll_robust_timedlock_wait\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_timedlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=a" (__result), "=c" (ignore1), "=&d" (ignore2), \
+ "=m" (futex), "=S" (ignore3) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "4" ((int) (private)) \
: "memory"); \
- ret; })
+ __result; })
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2; \
- __asm__ __volatile ("cmpl $0, %%gs:%P6\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tcmpxchgl %1, %2\n\t" \
- "jnz _L_mutex_lock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_lock_%=,@function\n" \
- "_L_mutex_lock_%=:\n\t" \
- "leal %2, %%ecx\n\t" \
- "call __lll_mutex_lock_wait\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=a" (ignore1), "=c" (ignore2), "=m" (futex) \
- : "0" (0), "1" (1), "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile ("cmpl $0, %%gs:%P3\n\t" \
- "je,pt 0f\n\t" \
- "lock\n" \
- "0:\tsubl $1,%0\n\t" \
- "jne _L_mutex_unlock_%=\n\t" \
- ".subsection 1\n\t" \
- ".type _L_mutex_unlock_%=,@function\n" \
- "_L_mutex_unlock_%=:\n\t" \
- "leal %0, %%eax\n\t" \
- "call __lll_mutex_unlock_wake\n\t" \
- "jmp 1f\n\t" \
- ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
- ".previous\n" \
- "1:" \
- : "=m" (futex), "=&a" (ignore) \
- : "m" (futex), \
- "i" (offsetof (tcbhead_t, multiple_threads)) \
- : "memory"); })
+#if defined NOT_IN_libc || defined UP
+# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
+#else
+# define __lll_unlock_asm "cmpl $0, %%gs:%P3\n\t" \
+ "je 0f\n\t" \
+ "lock\n" \
+ "0:\tsubl $1,%0\n\t"
#endif
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "2:\tcall __lll_unlock_wake_private\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_3 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore) \
+ : "m" (futex), "i" (MULTIPLE_THREADS_OFFSET) \
+ : "memory"); \
+ else \
+ { \
+ int ignore2; \
+ __asm__ __volatile (__lll_unlock_asm \
+ "jne _L_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=,@function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (MULTIPLE_THREADS_OFFSET), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ } \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ (void) \
+ ({ int ignore, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t" \
+ "jne _L_robust_unlock_%=\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=,@function\n" \
+ "_L_robust_unlock_%=:\n\t" \
+ "1:\tleal %0, %%eax\n" \
+ "0:\tmovl %5, %%ecx\n" \
+ "2:\tcall __lll_unlock_wake\n" \
+ "3:\tjmp 18f\n" \
+ "4:\t.size _L_robust_unlock_%=, 4b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_4 \
+ "18:" \
+ : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "g" ((int) (private)) \
+ : "memory"); \
+ })
+
+
+#define lll_robust_dead(futex, private) \
+ (void) \
+ ({ int __ignore; \
+ register int _nr __asm__ ("edx") = 1; \
+ __asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t" \
+ LLL_EBX_LOAD \
+ LLL_ENTER_KERNEL \
+ LLL_EBX_LOAD \
+ : "=a" (__ignore) \
+ : "0" (SYS_futex), LLL_EBX_REG (&(futex)), \
+ "c" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "d" (_nr), "i" (FUTEX_OWNER_DIED), \
+ "i" (offsetof (tcbhead_t, sysinfo))); \
+ })
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -308,21 +548,22 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
The macro parameter must not have any side effect. */
#define lll_wait_tid(tid) \
- ({ \
- int __ret; \
- register __typeof (tid) _tid __asm__ ("edx") = (tid); \
+ do { \
+ int __ignore; \
+ register __typeof (tid) _tid __asm__ ("edx") = (tid); \
if (_tid != 0) \
__asm__ __volatile (LLL_EBX_LOAD \
"1:\tmovl %1, %%eax\n\t" \
LLL_ENTER_KERNEL \
"cmpl $0, (%%ebx)\n\t" \
- "jne,pn 1b\n\t" \
+ "jne 1b\n\t" \
LLL_EBX_LOAD \
- : "=&a" (__ret) \
+ : "=&a" (__ignore) \
: "i" (SYS_futex), LLL_EBX_REG (&tid), "S" (0), \
"c" (FUTEX_WAIT), "d" (_tid), \
- "i" (offsetof (tcbhead_t, sysinfo))); \
- __ret; })
+ "i" (offsetof (tcbhead_t, sysinfo)) \
+ : "memory"); \
+ } while (0)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
__attribute__ ((regparm (2))) attribute_hidden;
@@ -338,28 +579,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- __attribute ((regparm (2))) attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- __attribute ((regparm (1))) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
index 5bdba3f51..6557359b4 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/not-cancel.h
@@ -1,5 +1,5 @@
/* Uncancelable versions of cancelable interfaces. Linux/NPTL version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -26,20 +26,21 @@ extern int __close_nocancel (int) attribute_hidden;
extern int __read_nocancel (int, void *, size_t) attribute_hidden;
extern int __write_nocancel (int, const void *, size_t) attribute_hidden;
extern pid_t __waitpid_nocancel (pid_t, int *, int) attribute_hidden;
-
-libc_hidden_proto(__open_nocancel)
-libc_hidden_proto(__close_nocancel)
-libc_hidden_proto(__read_nocancel)
-libc_hidden_proto(__write_nocancel)
-libc_hidden_proto(__waitpid_nocancel)
-
+extern int __openat_nocancel (int fd, const char *fname, int oflag,
+ mode_t mode) attribute_hidden;
+extern int __openat64_nocancel (int fd, const char *fname, int oflag,
+ mode_t mode) attribute_hidden;
#else
-#define __open_nocancel(name, ...) open (name, __VA_ARGS__)
-#define __close_nocancel(fd) close (fd)
-#define __read_nocancel(fd, buf, len) read (fd, buf, len)
-#define __write_nocancel(fd, buf, len) write (fd, buf, len)
-#define __waitpid_nocancel(pid, stat_loc, options) \
- waitpid (pid, stat_loc, options)
+# define __open_nocancel(name, ...) __open (name, __VA_ARGS__)
+# define __close_nocancel(fd) __close (fd)
+# define __read_nocancel(fd, buf, len) __read (fd, buf, len)
+# define __write_nocancel(fd, buf, len) __write (fd, buf, len)
+# define __waitpid_nocancel(pid, stat_loc, options) \
+ __waitpid (pid, stat_loc, options)
+# define __openat_nocancel(fd, fname, oflag, mode) \
+ openat (fd, fname, oflag, mode)
+# define __openat64_nocancel(fd, fname, oflag, mode) \
+ openat64 (fd, fname, oflag, mode)
#endif
/* Uncancelable open. */
@@ -48,6 +49,16 @@ libc_hidden_proto(__waitpid_nocancel)
#define open_not_cancel_2(name, flags) \
__open_nocancel (name, flags)
+/* Uncancelable openat. */
+#define openat_not_cancel(fd, fname, oflag, mode) \
+ __openat_nocancel (fd, fname, oflag, mode)
+#define openat_not_cancel_3(fd, fname, oflag) \
+ __openat_nocancel (fd, fname, oflag, 0)
+#define openat64_not_cancel(fd, fname, oflag, mode) \
+ __openat64_nocancel (fd, fname, oflag, mode)
+#define openat64_not_cancel_3(fd, fname, oflag) \
+ __openat64_nocancel (fd, fname, oflag, 0)
+
/* Uncancelable close. */
#define close_not_cancel(fd) \
__close_nocancel (fd)
@@ -80,3 +91,15 @@ libc_hidden_proto(__waitpid_nocancel)
# define waitpid_not_cancel(pid, stat_loc, options) \
INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL)
#endif
+
+/* Uncancelable pause. */
+#define pause_not_cancel() \
+ __pause_nocancel ()
+
+/* Uncancelable nanosleep. */
+#define nanosleep_not_cancel(requested_time, remaining) \
+ __nanosleep_nocancel (requested_time, remaining)
+
+/* Uncancelable sigsuspend. */
+#define sigsuspend_not_cancel(set) \
+ __sigsuspend_nocancel (set)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
index 939538927..7ab222e1b 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pt-vfork.S
@@ -17,6 +17,10 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+#include <sysdep.h>
+#define _ERRNO_H 1
+#include <bits/errno.h>
+#include <bits/kernel-features.h>
#include <tcb-offsets.h>
/* Save the PID value. */
@@ -33,4 +37,32 @@
movl %edx, %gs:PID; \
1:
-#include <../../../../../../../libc/sysdeps/linux/i386/vfork.S>
+/* Clone the calling process, but without copying the whole address space.
+ The calling process is suspended until the new process exits or is
+ replaced by a call to `execve'. Return -1 for errors, 0 to the new process,
+ and the process ID of the new process to the old process. */
+
+ENTRY (__vfork)
+ /* Pop the return PC value into ECX. */
+ popl %ecx
+
+ SAVE_PID
+
+ /* Stuff the syscall number in EAX and enter into the kernel. */
+ movl $SYS_ify (vfork), %eax
+ int $0x80
+
+ RESTORE_PID
+
+ /* Jump to the return PC. Don't jump directly since this
+ disturbs the branch target cache. Instead push the return
+ address back on the stack. */
+ pushl %ecx
+
+ cmpl $-4095, %eax
+ jae SYSCALL_ERROR_LABEL /* Branch forward if it failed. */
+.Lpseudo_end:
+ ret
+PSEUDO_END (__vfork)
+
+weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
index 5ab2c5856..9a3b36303 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/pthread_once.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,14 +19,9 @@
#include <unwindbuf.h>
#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define FUTEX_WAKE 1
.comm __fork_generation, 4, 4
@@ -89,7 +84,16 @@ __pthread_once:
jnz 3f /* Different for generation -> run initializer. */
/* Somebody else got here first. Wait. */
- movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %ecx
+#else
+# if FUTEX_WAIT == 0
+ movl %gs:PRIVATE_FUTEX, %ecx
+# else
+ movl $FUTEX_WAIT, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+# endif
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
jmp 6b
@@ -130,7 +134,12 @@ __pthread_once:
/* Wake up all other threads. */
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
movl $FUTEX_WAKE, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
@@ -151,7 +160,12 @@ __pthread_once:
movl $0, (%ebx)
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %ecx
+#else
movl $FUTEX_WAKE, %ecx
+ orl %gs:PRIVATE_FUTEX, %ecx
+#endif
movl $SYS_futex, %eax
ENTER_KERNEL
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
index 2c0cbe99a..f68a0c075 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/smp.h
@@ -1,5 +1,5 @@
-/* Determine whether the host has multiple processors. SH version.
- Copyright (C) 2002 Free Software Foundation, Inc.
+/* Determine whether the host has multiple processors. Linux version.
+ Copyright (C) 1996, 2002, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,8 +17,40 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/utsname.h>
+#include <not-cancel.h>
+
+/* Test whether the machine has more than one processor. This is not the
+ best test but good enough. More complicated tests would require `malloc'
+ which is not available at that time. */
static inline int
is_smp_system (void)
{
- return 0;
+ union
+ {
+ struct utsname uts;
+ char buf[512];
+ } u;
+ char *cp;
+
+ /* Try reading the number using `sysctl' first. */
+ if (uname (&u.uts) == 0)
+ cp = u.uts.version;
+ else
+ {
+ /* This was not successful. Now try reading the /proc filesystem. */
+ int fd = open_not_cancel_2 ("/proc/sys/kernel/version", O_RDONLY);
+ if (__builtin_expect (fd, 0) == -1
+ || read_not_cancel (fd, u.buf, sizeof (u.buf)) <= 0)
+ /* This also didn't work. We give up and say it's a UP machine. */
+ u.buf[0] = '\0';
+
+ close_not_cancel_no_status (fd);
+ cp = u.buf;
+ }
+
+ return strstr (cp, "SMP") != NULL;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
index f32c5bd20..cb8d6891c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
@@ -58,6 +58,7 @@
# define SAVE_OLDTYPE_3 SAVE_OLDTYPE_2
# define SAVE_OLDTYPE_4 SAVE_OLDTYPE_2
# define SAVE_OLDTYPE_5 SAVE_OLDTYPE_2
+# define SAVE_OLDTYPE_6 SAVE_OLDTYPE_2
# define PUSHCARGS_0 /* No arguments to push. */
# define DOCARGS_0 /* No arguments to frob. */
@@ -101,6 +102,14 @@
# define _POPCARGS_5 _POPCARGS_4; popl %edi; \
cfi_adjust_cfa_offset (-4); cfi_restore (edi);
+# define PUSHCARGS_6 _PUSHCARGS_6
+# define DOCARGS_6 _DOARGS_6 (44)
+# define POPCARGS_6 _POPCARGS_6
+# define _PUSHCARGS_6 pushl %ebp; cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (ebp, 0); _PUSHCARGS_5
+# define _POPCARGS_6 _POPCARGS_5; popl %ebp; \
+ cfi_adjust_cfa_offset (-4); cfi_restore (ebp);
+
# ifdef IS_IN_libpthread
# define CENABLE call __pthread_enable_asynccancel;
# define CDISABLE call __pthread_disable_asynccancel
@@ -122,6 +131,7 @@
# define POPSTATE_3 POPSTATE_2
# define POPSTATE_4 POPSTATE_3
# define POPSTATE_5 POPSTATE_4
+# define POPSTATE_6 POPSTATE_5
# ifndef __ASSEMBLER__
# define SINGLE_THREAD_P \
@@ -137,3 +147,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
index dc7fb2ec4..b39099af5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/vfork.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 1999, 2002, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1999,2002,2004,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h
index 07ee9d7dd..add20b6f7 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/internaltypes.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -21,7 +21,6 @@
#define _INTERNALTYPES_H 1
#include <stdint.h>
-#include <sched.h>
struct pthread_attr
@@ -77,9 +76,11 @@ struct pthread_condattr
/* The __NWAITERS field is used as a counter and to house the number
- of bits which represent the clock. COND_CLOCK_BITS is the number
- of bits reserved for the clock. */
-#define COND_CLOCK_BITS 1
+ of bits for other purposes. COND_CLOCK_BITS is the number
+ of bits needed to represent the ID of the clock. COND_NWAITERS_SHIFT
+ is the number of bits reserved for other purposes like the clock. */
+#define COND_CLOCK_BITS 1
+#define COND_NWAITERS_SHIFT 1
/* Read-write lock variable attribute data structure. */
@@ -97,6 +98,7 @@ struct pthread_barrier
int lock;
unsigned int left;
unsigned int init_count;
+ int private;
};
@@ -138,9 +140,16 @@ struct pthread_key_struct
/* Semaphore variable structure. */
-struct sem
+struct new_sem
{
- unsigned int count;
+ unsigned int value;
+ int private;
+ unsigned long int nwaiters;
+};
+
+struct old_sem
+{
+ unsigned int value;
};
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c b/libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c
index c435eff30..f2795510a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/jmp-unwind.c
@@ -1,5 +1,5 @@
/* Clean up stack frames unwound by longjmp. Linux version.
- Copyright (C) 1995, 1997, 2002, 2003 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1997, 2002, 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -29,11 +29,11 @@ void
_longjmp_unwind (jmp_buf env, int val)
{
#ifdef SHARED
-# define fptr __libc_pthread_functions.ptr___pthread_cleanup_upto
+ if (__libc_pthread_functions_init)
+ PTHFCT_CALL (ptr___pthread_cleanup_upto, (env->__jmpbuf,
+ CURRENT_STACK_FRAME));
#else
-# define fptr __pthread_cleanup_upto
+ if (__pthread_cleanup_upto != NULL)
+ __pthread_cleanup_upto (env->__jmpbuf, CURRENT_STACK_FRAME);
#endif
-
- if (fptr != NULL)
- fptr (env->__jmpbuf, CURRENT_STACK_FRAME);
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
index 4ad252800..016437768 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/libc_pthread_init.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002,2003,2005,2006,2007,2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,13 +19,15 @@
#include <unistd.h>
#include <list.h>
-#include "fork.h"
+#include <fork.h>
+#include <dl-sysdep.h>
#include <tls.h>
#include <string.h>
#include <pthreadP.h>
#include <bits/libc-lock.h>
+#include <sysdep.h>
+#include <ldsodefs.h>
-libc_hidden_proto(memcpy)
#ifdef TLS_MULTIPLE_THREADS_IN_TCB
void
@@ -46,14 +48,40 @@ __libc_pthread_init (
__register_atfork (NULL, NULL, reclaim, NULL);
#ifdef SHARED
- /* We copy the content of the variable pointed to by the FUNCTIONS
- parameter to one in libc.so since this means access to the array
- can be done with one memory access instead of two. */
- memcpy (&__libc_pthread_functions, functions,
- sizeof (__libc_pthread_functions));
+ /* Copy the function pointers into an array in libc. This enables
+ access with just one memory reference but moreso, it prevents
+ hijacking the function pointers with just one pointer change. We
+ "encrypt" the function pointers since we cannot write-protect the
+ array easily enough. */
+ union ptrhack
+ {
+ struct pthread_functions pf;
+# define NPTRS (sizeof (struct pthread_functions) / sizeof (void *))
+ void *parr[NPTRS];
+ } __attribute__ ((may_alias)) const *src;
+ union ptrhack *dest;
+
+ src = (const void *) functions;
+ dest = (void *) &__libc_pthread_functions;
+
+ for (size_t cnt = 0; cnt < NPTRS; ++cnt)
+ {
+ void *p = src->parr[cnt];
+ PTR_MANGLE (p);
+ dest->parr[cnt] = p;
+ }
+ __libc_pthread_functions_init = 1;
#endif
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
return &__libc_multiple_threads;
#endif
}
+
+#ifdef SHARED
+libc_freeres_fn (freeres_libptread)
+{
+ if (__libc_pthread_functions_init)
+ PTHFCT_CALL (ptr_freeres, ());
+}
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym
index 36e28eb2a..cfe22b089 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelbarrier.sym
@@ -9,3 +9,4 @@ CURR_EVENT offsetof (struct pthread_barrier, curr_event)
MUTEX offsetof (struct pthread_barrier, lock)
LEFT offsetof (struct pthread_barrier, left)
INIT_COUNT offsetof (struct pthread_barrier, init_count)
+PRIVATE offsetof (struct pthread_barrier, private)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym
index c5e797806..18e1adad4 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelcond.sym
@@ -13,4 +13,4 @@ wakeup_seq offsetof (pthread_cond_t, __data.__wakeup_seq)
woken_seq offsetof (pthread_cond_t, __data.__woken_seq)
dep_mutex offsetof (pthread_cond_t, __data.__mutex)
broadcast_seq offsetof (pthread_cond_t, __data.__broadcast_seq)
-clock_bits COND_CLOCK_BITS
+nwaiters_shift COND_NWAITERS_SHIFT
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
index f93891431..f459bcf6e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevellock.c
@@ -1,5 +1,5 @@
/* low level locking for pthread library. Generic futex-using version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -25,34 +25,46 @@
void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
{
- do
- {
- int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
- if (oldval != 0)
- lll_futex_wait (futex, 2);
- }
- while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
+ if (*futex == 2)
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+
+ while (atomic_exchange_acq (futex, 2) != 0)
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
+}
+
+
+/* These functions don't get included in libc.so */
+#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+ if (*futex == 2)
+ lll_futex_wait (futex, 2, private);
+
+ while (atomic_exchange_acq (futex, 2) != 0)
+ lll_futex_wait (futex, 2, private);
}
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
return EINVAL;
- do
+ /* Try locking. */
+ while (atomic_exchange_acq (futex, 2) != 0)
{
struct timeval tv;
- struct timespec rt;
/* Get the current time. */
(void) gettimeofday (&tv, NULL);
/* Compute relative timeout. */
+ struct timespec rt;
rt.tv_sec = abstime->tv_sec - tv.tv_sec;
rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
if (rt.tv_nsec < 0)
@@ -61,30 +73,12 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
--rt.tv_sec;
}
- /* Already timed out? */
if (rt.tv_sec < 0)
return ETIMEDOUT;
/* Wait. */
- int oldval = atomic_compare_and_exchange_val_acq (futex, 2, 1);
- if (oldval != 0)
- lll_futex_timed_wait (futex, 2, &rt);
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
- while (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0);
-
- return 0;
-}
-
-
-/* These don't get included in libc.so */
-#ifdef IS_IN_libpthread
-int
-lll_unlock_wake_cb (int *futex)
-{
- int val = atomic_exchange_rel (futex, 0);
-
- if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1);
return 0;
}
@@ -105,7 +99,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
struct timespec rt;
/* Get the current time. */
- (void) gettimeofday (&tv, NULL);
+ (void) __gettimeofday (&tv, NULL);
/* Compute relative timeout. */
rt.tv_sec = abstime->tv_sec - tv.tv_sec;
@@ -120,12 +114,12 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
if (rt.tv_sec < 0)
return ETIMEDOUT;
- /* Wait until thread terminates. */
- if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+ /* Wait until thread terminates. The kernel so far does not use
+ the private futex operations for this. */
+ if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
return ETIMEDOUT;
}
return 0;
}
-
#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym
new file mode 100644
index 000000000..2f1e9da52
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym
@@ -0,0 +1,6 @@
+#include <stddef.h>
+#include <pthreadP.h>
+
+--
+
+TID offsetof (struct pthread, tid)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym
index e82c878d3..f50b25bfb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/lowlevelrwlock.sym
@@ -1,6 +1,7 @@
#include <stddef.h>
#include <stdio.h>
#include <bits/pthreadtypes.h>
+#include <bits/wordsize.h>
--
@@ -12,3 +13,4 @@ READERS_QUEUED offsetof (pthread_rwlock_t, __data.__nr_readers_queued)
WRITERS_QUEUED offsetof (pthread_rwlock_t, __data.__nr_writers_queued)
FLAGS offsetof (pthread_rwlock_t, __data.__flags)
WRITER offsetof (pthread_rwlock_t, __data.__writer)
+PSHARED offsetof (pthread_rwlock_t, __data.__shared)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h
index f112b8a39..166a6c6ae 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/pthreadtypes.h
@@ -1,5 +1,5 @@
/* Machine-specific pthread type layouts. MIPS version.
- Copyright (C) 2005 Free Software Foundation, Inc.
+ Copyright (C) 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -20,7 +20,7 @@
#ifndef _BITS_PTHREADTYPES_H
#define _BITS_PTHREADTYPES_H 1
-#include <sgidefs.h>
+#include <endian.h>
#if _MIPS_SIM == _ABI64
# define __SIZEOF_PTHREAD_ATTR_T 56
@@ -56,6 +56,7 @@ typedef union
long int __align;
} pthread_attr_t;
+
#if _MIPS_SIM == _ABI64
typedef struct __pthread_internal_list
{
@@ -69,6 +70,7 @@ typedef struct __pthread_internal_slist
} __pthread_slist_t;
#endif
+
/* Data structures for mutex handling. The structure of the attribute
type is deliberately not exposed. */
typedef union
@@ -87,7 +89,7 @@ typedef union
#if _MIPS_SIM == _ABI64
int __spins;
__pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV 1
+# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
@@ -157,9 +159,9 @@ typedef union
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
int __writer;
- int __pad1;
+ int __shared;
+ unsigned long int __pad1;
unsigned long int __pad2;
- unsigned long int __pad3;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
@@ -173,9 +175,21 @@ typedef union
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __shared;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
+#else
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
+#endif
int __writer;
} __data;
# endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h
index c4440f9e9..af43a6048 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/bits/semaphore.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -29,9 +29,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h
index 7edb28794..01bcf4120 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/lowlevellock.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008,
+ 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -24,160 +25,260 @@
#include <bits/pthreadtypes.h>
#include <atomic.h>
#include <sysdep.h>
-
+#include <bits/kernel-features.h>
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-
-#define lll_futex_wait(futexp, val) \
+#define FUTEX_WAKE_OP 5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
+
+
+#define lll_futex_wait(futexp, val, private) \
+ lll_futex_timed_wait(futexp, val, NULL, private)
+
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), 0); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
+ __lll_private_flag (FUTEX_WAIT, private), \
+ (val), (timespec)); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), (timespec)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
+ __lll_private_flag (FUTEX_WAKE, private), \
+ (nr), 0); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+ do \
+ { \
+ int *__futexp = &(futexv); \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ lll_futex_wake (__futexp, 1, private); \
+ } \
+ while (0)
+
+/* Returns non-zero if error happened, zero if success. */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAKE, (nr), 0); \
- INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
+ __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+ (nr_wake), (nr_move), (mutex), (val)); \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 6, \
- (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \
- (nr_move), (mutex), (val)); \
+ \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
+ (nr_wake), (nr_wake2), (futexp2), \
+ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
-
static inline int __attribute__((always_inline))
-__lll_mutex_trylock(int *futex)
+__lll_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
}
-#define lll_mutex_trylock(lock) __lll_mutex_trylock (&(lock))
+#define lll_trylock(lock) __lll_trylock (&(lock))
static inline int __attribute__((always_inline))
-__lll_mutex_cond_trylock(int *futex)
+__lll_cond_trylock(int *futex)
{
return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
}
-#define lll_mutex_cond_trylock(lock) __lll_mutex_cond_trylock (&(lock))
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-
-static inline void __attribute__((always_inline))
-__lll_mutex_lock(int *futex)
+static inline int __attribute__((always_inline))
+__lll_robust_trylock(int *futex, int id)
{
- if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- __lll_lock_wait (futex);
+ return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
}
-#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
+#define lll_robust_trylock(lock, id) \
+ __lll_robust_trylock (&(lock), id)
+
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define __lll_lock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
+ 1, 0), 0)) \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
+ } \
+ }))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
+
+
+#define __lll_robust_lock(futex, id, private) \
+ ({ \
+ int *__futex = (futex); \
+ int __val = 0; \
+ \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
+ })
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void __attribute__ ((always_inline))
-__lll_mutex_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
- __lll_lock_wait (futex);
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
+
+
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-extern int __lll_timedlock_wait (int *futex, const struct timespec *)
- attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
static inline int __attribute__ ((always_inline))
-__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
- result = __lll_timedlock_wait (futex, abstime);
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_mutex_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock (int *futex)
-{
- int val = atomic_exchange_rel (futex, 0);
- if (__builtin_expect (val > 1, 0))
- lll_futex_wake (futex, 1);
-}
-#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
-
-
-static inline void __attribute__ ((always_inline))
-__lll_mutex_unlock_force (int *futex)
+static inline int __attribute__ ((always_inline))
+__lll_robust_timedlock (int *futex, const struct timespec *abstime,
+ int id, int private)
{
- (void) atomic_exchange_rel (futex, 0);
- lll_futex_wake (futex, 1);
+ int result = 0;
+ if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
+ result = __lll_robust_timedlock_wait (futex, abstime, private);
+ return result;
}
-#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
-
-
-#define lll_mutex_islocked(futex) \
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
+
+
+#define __lll_unlock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val > 1, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ }))
+#define lll_unlock(futex, private) __lll_unlock(&(futex), private)
+
+
+#define __lll_robust_unlock(futex, private) \
+ ((void) ({ \
+ int *__futex = (futex); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1, private); \
+ }))
+#define lll_robust_unlock(futex, private) \
+ __lll_robust_unlock(&(futex), private)
+
+
+#define lll_islocked(futex) \
(futex != 0)
/* Our internal lock implementation is identical to the binary-compatible
mutex implementation. */
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
/* The states of a lock are:
0 - untaken
1 - taken by one user
>1 - taken by more users */
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
afterwards. */
#define lll_wait_tid(tid) \
- do { \
- __typeof (tid) __tid; \
- while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid); \
+ do { \
+ __typeof (tid) __tid; \
+ while ((__tid = (tid)) != 0) \
+ lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -191,26 +292,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c
index 649b752f5..ddfd32bdb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/pthread_once.c
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
pthread_once_t *once_control = (pthread_once_t *) arg;
*once_control = 0;
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
}
@@ -65,7 +65,7 @@ __pthread_once (once_control, init_routine)
if (((oldval ^ newval) & -4) == 0)
{
/* Same generation, some other thread was faster. Wait. */
- lll_futex_wait (once_control, newval);
+ lll_futex_wait (once_control, newval, LLL_PRIVATE);
continue;
}
}
@@ -84,7 +84,7 @@ __pthread_once (once_control, init_routine)
atomic_increment (once_control);
/* Wake up all other threads. */
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
break;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h
index 5fee89235..1cf625f4e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mips/sysdep-cancel.h
@@ -24,28 +24,38 @@
#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
-#ifdef __PIC__
+# ifdef __PIC__
+# define PSEUDO_CPLOAD .cpload t9;
+# define PSEUDO_ERRJMP la t9, __syscall_error; jr t9;
+# define PSEUDO_SAVEGP sw gp, 32(sp); cfi_rel_offset (gp, 32);
+# define PSEUDO_LOADGP lw gp, 32(sp);
+# else
+# define PSEUDO_CPLOAD
+# define PSEUDO_ERRJMP j __syscall_error;
+# define PSEUDO_SAVEGP
+# define PSEUDO_LOADGP
+# endif
+
# undef PSEUDO
# define PSEUDO(name, syscall_name, args) \
.align 2; \
L(pseudo_start): \
cfi_startproc; \
- 99: la t9,__syscall_error; \
- jr t9; \
+ 99: PSEUDO_ERRJMP \
.type __##syscall_name##_nocancel, @function; \
.globl __##syscall_name##_nocancel; \
__##syscall_name##_nocancel: \
.set noreorder; \
- .cpload t9; \
+ PSEUDO_CPLOAD \
li v0, SYS_ify(syscall_name); \
syscall; \
.set reorder; \
- bne a3, zero, SYSCALL_ERROR_LABEL; \
+ bne a3, zero, 99b; \
ret; \
.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
ENTRY (name) \
.set noreorder; \
- .cpload t9; \
+ PSEUDO_CPLOAD \
.set reorder; \
SINGLE_THREAD_P(v1); \
bne zero, v1, L(pseudo_cancel); \
@@ -53,17 +63,16 @@
li v0, SYS_ify(syscall_name); \
syscall; \
.set reorder; \
- bne a3, zero, SYSCALL_ERROR_LABEL; \
+ bne a3, zero, 99b; \
ret; \
L(pseudo_cancel): \
SAVESTK_##args; \
sw ra, 28(sp); \
cfi_rel_offset (ra, 28); \
- sw gp, 32(sp); \
- cfi_rel_offset (gp, 32); \
+ PSEUDO_SAVEGP \
PUSHARGS_##args; /* save syscall args */ \
CENABLE; \
- lw gp, 32(sp); \
+ PSEUDO_LOADGP \
sw v0, 44(sp); /* save mask */ \
POPARGS_##args; /* restore syscall args */ \
.set noreorder; \
@@ -74,12 +83,12 @@
sw a3, 40(sp); /* save syscall error flag */ \
lw a0, 44(sp); /* pass mask as arg1 */ \
CDISABLE; \
- lw gp, 32(sp); \
+ PSEUDO_LOADGP \
lw v0, 36(sp); /* restore syscall result */ \
lw a3, 40(sp); /* restore syscall error flag */ \
lw ra, 28(sp); /* restore return address */ \
.set noreorder; \
- bne a3, zero, SYSCALL_ERROR_LABEL; \
+ bne a3, zero, 99b; \
RESTORESTK; \
L(pseudo_end): \
.set reorder;
@@ -87,8 +96,6 @@
# undef PSEUDO_END
# define PSEUDO_END(sym) cfi_endproc; .end sym; .size sym,.-sym
-#endif
-
# define PUSHARGS_0 /* nothing to do */
# define PUSHARGS_1 PUSHARGS_0 sw a0, 0(sp); cfi_rel_offset (a0, 0);
# define PUSHARGS_2 PUSHARGS_1 sw a1, 4(sp); cfi_rel_offset (a1, 4);
@@ -135,19 +142,25 @@
# define RESTORESTK addu sp, STKSPACE; cfi_adjust_cfa_offset(-STKSPACE)
+# ifdef __PIC__
/* We use jalr rather than jal. This means that the assembler will not
automatically restore $gp (in case libc has multiple GOTs) so we must
do it manually - which we have to do anyway since we don't use .cprestore.
It also shuts up the assembler warning about not using .cprestore. */
+# define PSEUDO_JMP(sym) la t9, sym; jalr t9;
+# else
+# define PSEUDO_JMP(sym) jal sym;
+# endif
+
# ifdef IS_IN_libpthread
-# define CENABLE la t9, __pthread_enable_asynccancel; jalr t9;
-# define CDISABLE la t9, __pthread_disable_asynccancel; jalr t9;
+# define CENABLE PSEUDO_JMP (__pthread_enable_asynccancel)
+# define CDISABLE PSEUDO_JMP (__pthread_disable_asynccancel)
# elif defined IS_IN_librt
-# define CENABLE la t9, __librt_enable_asynccancel; jalr t9;
-# define CDISABLE la t9, __librt_disable_asynccancel; jalr t9;
+# define CENABLE PSEUDO_JMP (__librt_enable_asynccancel)
+# define CDISABLE PSEUDO_JMP (__librt_disable_asynccancel)
# else
-# define CENABLE la t9, __libc_enable_asynccancel; jalr t9;
-# define CDISABLE la t9, __libc_disable_asynccancel; jalr t9;
+# define CENABLE PSEUDO_JMP (__libc_enable_asynccancel)
+# define CDISABLE PSEUDO_JMP (__libc_disable_asynccancel)
# endif
# ifndef __ASSEMBLER__
@@ -167,3 +180,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c b/libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c
index 9d16fee56..188040ee8 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/mq_notify.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2004, 2005, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contribute by Ulrich Drepper <drepper@redhat.com>, 2004.
@@ -29,6 +29,7 @@
#include <unistd.h>
#include <sys/socket.h>
#include <not-cancel.h>
+#include <bits/kernel-features.h>
#ifdef __NR_mq_notify
@@ -157,7 +158,7 @@ init_mq_netlink (void)
if (netlink_socket == -1)
{
/* Just a normal netlink socket, not bound. */
- netlink_socket = socket (AF_NETLINK, SOCK_RAW, 0);
+ netlink_socket = socket (AF_NETLINK, SOCK_RAW, 0);
/* No need to do more if we have no socket. */
if (netlink_socket == -1)
return;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions
deleted file mode 100644
index 997784798..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/Versions
+++ /dev/null
@@ -1,5 +0,0 @@
-libpthread {
- GLIBC_2.3.4 {
- longjmp; siglongjmp;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
index c94ed0c38..c0b59c336 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
@@ -1,5 +1,5 @@
/* Machine-specific pthread type layouts. PowerPC version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -57,6 +57,7 @@ typedef union
long int __align;
} pthread_attr_t;
+
#if __WORDSIZE == 64
typedef struct __pthread_internal_list
{
@@ -70,6 +71,7 @@ typedef struct __pthread_internal_slist
} __pthread_slist_t;
#endif
+
/* Data structures for mutex handling. The structure of the attribute
type is deliberately not exposed. */
typedef union
@@ -88,7 +90,7 @@ typedef union
#if __WORDSIZE == 64
int __spins;
__pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV 1
+# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
@@ -158,9 +160,9 @@ typedef union
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
int __writer;
- int __pad1;
+ int __shared;
+ unsigned long int __pad1;
unsigned long int __pad2;
- unsigned long int __pad3;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
@@ -174,9 +176,12 @@ typedef union
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __shared;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
int __writer;
} __data;
# endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h
index 8123b418b..c7f121ba5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/bits/semaphore.h
@@ -33,9 +33,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
index 1f2f481d6..66c02cbbd 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -24,7 +24,7 @@
#include <sys/param.h>
#include <bits/pthreadtypes.h>
#include <atomic.h>
-
+#include <kernel-features.h>
#ifndef __NR_futex
# define __NR_futex 221
@@ -33,167 +33,262 @@
#define FUTEX_WAKE 1
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
+#define lll_futex_wait(futexp, val, private) \
+ lll_futex_timed_wait (futexp, val, NULL, private)
-#define lll_futex_wait(futexp, val) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
\
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), 0); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAIT, private), \
+ (val), (timespec)); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_wake(futexp, nr, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
\
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAIT, (val), (timespec)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAKE, private), \
+ (nr), 0); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
})
-#define lll_futex_wake(futexp, nr) \
+#define lll_robust_dead(futexv, private) \
+ do \
+ { \
+ INTERNAL_SYSCALL_DECL (__err); \
+ int *__futexp = &(futexv); \
+ \
+ atomic_or (__futexp, FUTEX_OWNER_DIED); \
+ INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
+ __lll_private_flag (FUTEX_WAKE, private), 1, 0); \
+ } \
+ while (0)
+
+/* Returns non-zero if error happened, zero if success. */
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
\
- __ret = INTERNAL_SYSCALL (futex, __err, 4, \
- (futexp), FUTEX_WAKE, (nr), 0); \
- INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+ (nr_wake), (nr_move), (mutex), (val)); \
+ INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
\
- __ret = INTERNAL_SYSCALL (futex, __err, 6, \
- (futexp), FUTEX_CMP_REQUEUE, (nr_wake), \
- (nr_move), (mutex), (val)); \
+ __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
+ (nr_wake), (nr_wake2), (futexp2), \
+ FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
+
#ifdef UP
# define __lll_acq_instr ""
# define __lll_rel_instr ""
#else
# define __lll_acq_instr "isync"
-# define __lll_rel_instr "sync"
+# ifdef _ARCH_PWR4
+/*
+ * Newer powerpc64 processors support the new "light weight" sync (lwsync)
+ * So if the build is using -mcpu=[power4,power5,power5+,970] we can
+ * safely use lwsync.
+ */
+# define __lll_rel_instr "lwsync"
+# else
+/*
+ * Older powerpc32 processors don't support the new "light weight"
+ * sync (lwsync). So the only safe option is to use normal sync
+ * for all powerpc32 applications.
+ */
+# define __lll_rel_instr "sync"
+# endif
#endif
-/* Set *futex to 1 if it is 0, atomically. Returns the old value */
-#define __lll_trylock(futex) \
+/* Set *futex to ID if it is 0, atomically. Returns the old value */
+#define __lll_robust_trylock(futex, id) \
({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
+ __asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
" cmpwi 0,%0,0\n" \
" bne 2f\n" \
" stwcx. %3,0,%2\n" \
" bne- 1b\n" \
"2: " __lll_acq_instr \
: "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (1), "m" (*futex) \
+ : "r" (futex), "r" (id), "m" (*futex) \
: "cr0", "memory"); \
__val; \
})
-#define lll_mutex_trylock(lock) __lll_trylock (&(lock))
+#define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
+
+/* Set *futex to 1 if it is 0, atomically. Returns the old value */
+#define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
+
+#define lll_trylock(lock) __lll_trylock (&(lock))
/* Set *futex to 2 if it is 0, atomically. Returns the old value */
-#define __lll_cond_trylock(futex) \
- ({ int __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
- " cmpwi 0,%0,0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b\n" \
- "2: " __lll_acq_instr \
- : "=&r" (__val), "=m" (*futex) \
- : "r" (futex), "r" (2), "m" (*futex) \
- : "cr0", "memory"); \
- __val; \
- })
-#define lll_mutex_cond_trylock(lock) __lll_cond_trylock (&(lock))
+#define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
+#define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-#define lll_mutex_lock(lock) \
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
+
+#define lll_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ { \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__futex); \
+ else \
+ __lll_lock_wait (__futex, private); \
+ } \
+ })
+
+#define lll_robust_lock(lock, id, private) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
})
-#define lll_mutex_cond_lock(lock) \
+#define lll_cond_lock(lock, private) \
(void) ({ \
int *__futex = &(lock); \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
0) != 0) \
- __lll_lock_wait (__futex); \
+ __lll_lock_wait (__futex, private); \
+ })
+
+#define lll_robust_cond_lock(lock, id, private) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ int __id = id | FUTEX_WAITERS; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
+ 0), 0)) \
+ __val = __lll_robust_lock_wait (__futex, private); \
+ __val; \
})
+
extern int __lll_timedlock_wait
- (int *futex, const struct timespec *) attribute_hidden;
+ (int *futex, const struct timespec *, int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait
+ (int *futex, const struct timespec *, int private) attribute_hidden;
-#define lll_mutex_timedlock(lock, abstime) \
+#define lll_timedlock(lock, abstime, private) \
({ \
int *__futex = &(lock); \
int __val = 0; \
if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
0) != 0) \
- __val = __lll_timedlock_wait (__futex, abstime); \
+ __val = __lll_timedlock_wait (__futex, abstime, private); \
+ __val; \
+ })
+
+#define lll_robust_timedlock(lock, abstime, id, private) \
+ ({ \
+ int *__futex = &(lock); \
+ int __val = 0; \
+ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
+ 0), 0)) \
+ __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
__val; \
})
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_unlock_force(lock) \
+#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
- *__futex = 0; \
- __asm __volatile (__lll_rel_instr ::: "memory"); \
- lll_futex_wake (__futex, 1); \
+ int __val = atomic_exchange_rel (__futex, 0); \
+ if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-/* Our internal lock implementation is identical to the binary-compatible
- mutex implementation. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
/* The states of a lock are:
0 - untaken
1 - taken by one user
>1 - taken by more users */
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
/* The kernel notifies a process which uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -202,7 +297,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
do { \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid); \
+ lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *, const struct timespec *)
@@ -216,26 +311,4 @@ extern int __lll_timedwait_tid (int *, const struct timespec *)
__res; \
})
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond)
- attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond)
- attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond)
- attribute_hidden;
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S
index e19579e84..675a997e9 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S
@@ -1,3 +1,9 @@
-#define RESET_PID
-#include <tcb-offsets.h>
-#include <sysdeps/unix/sysv/linux/powerpc/powerpc32/clone.S>
+/* We want an #include_next, but we are the main source file.
+ So, #include ourselves and in that incarnation we can use #include_next. */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <clone.S>
+#else
+# define RESET_PID
+# include_next <clone.S>
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
index 0e6225624..88b24e7d9 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/sysdep-cancel.h
@@ -1,5 +1,5 @@
/* Cancellable system call stubs. Linux/PowerPC version.
- Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>, 2003.
@@ -15,8 +15,8 @@
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
#include <sysdep.h>
#include <tls.h>
@@ -30,7 +30,6 @@
# define PSEUDO(name, syscall_name, args) \
.section ".text"; \
ENTRY (name) \
- cfi_startproc; \
SINGLE_THREAD_P; \
bne- .Lpseudo_cancel; \
.type __##syscall_name##_nocancel,@function; \
@@ -45,7 +44,6 @@
mflr 9; \
stw 9,52(1); \
cfi_offset (lr, 4); \
- CGOTSETUP; \
DOCARGS_##args; /* save syscall args around CENABLE. */ \
CENABLE; \
stw 3,16(1); /* store CENABLE return value (MASK). */ \
@@ -59,11 +57,9 @@
lwz 4,52(1); \
lwz 0,12(1); /* restore CR/R3. */ \
lwz 3,8(1); \
- CGOTRESTORE; \
mtlr 4; \
mtcr 0; \
- addi 1,1,48; \
- cfi_endproc;
+ addi 1,1,48;
# define DOCARGS_0
# define UNDOCARGS_0
@@ -86,9 +82,6 @@
# define DOCARGS_6 stw 8,40(1); DOCARGS_5
# define UNDOCARGS_6 lwz 8,40(1); UNDOCARGS_5
-# define CGOTSETUP
-# define CGOTRESTORE
-
# ifdef IS_IN_libpthread
# define CENABLE bl __pthread_enable_asynccancel@local
# define CDISABLE bl __pthread_disable_asynccancel@local
@@ -96,20 +89,8 @@
# define CENABLE bl __libc_enable_asynccancel@local
# define CDISABLE bl __libc_disable_asynccancel@local
# elif defined IS_IN_librt
-# define CENABLE bl JUMPTARGET(__librt_enable_asynccancel)
-# define CDISABLE bl JUMPTARGET(__librt_disable_asynccancel)
-# if defined HAVE_AS_REL16 && defined __PIC__
-# undef CGOTSETUP
-# define CGOTSETUP \
- bcl 20,31,1f; \
- 1: stw 30,44(1); \
- mflr 30; \
- addis 30,30,_GLOBAL_OFFSET_TABLE-1b@ha; \
- addi 30,30,_GLOBAL_OFFSET_TABLE-1b@l
-# undef CGOTRESTORE
-# define CGOTRESTORE \
- lwz 30,44(1)
-# endif
+# define CENABLE bl __librt_enable_asynccancel@local
+# define CDISABLE bl __librt_disable_asynccancel@local
# else
# error Unsupported library
# endif
@@ -130,3 +111,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S
index b7e2cf6e7..eed2a8f1a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc32/vfork.S
@@ -53,5 +53,5 @@ ENTRY (__vfork)
PSEUDO_RET
PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions
deleted file mode 100644
index 3b111ddb5..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
- GLIBC_2.3.3 {
- # Changed timer_t.
- timer_create; timer_delete; timer_getoverrun; timer_gettime;
- timer_settime;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S
index f87adf473..675a997e9 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S
@@ -1,3 +1,9 @@
-#define RESET_PID
-#include <tcb-offsets.h>
-#include <sysdeps/unix/sysv/linux/powerpc/powerpc64/clone.S>
+/* We want an #include_next, but we are the main source file.
+ So, #include ourselves and in that incarnation we can use #include_next. */
+#ifndef INCLUDED_SELF
+# define INCLUDED_SELF
+# include <clone.S>
+#else
+# define RESET_PID
+# include_next <clone.S>
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h
index 226aaafdc..707765ab5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/sysdep-cancel.h
@@ -1,5 +1,5 @@
/* Cancellable system call stubs. Linux/PowerPC64 version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Franz Sirl <Franz.Sirl-kernel@lauterbach.com>, 2003.
@@ -15,8 +15,8 @@
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
#include <sysdep.h>
#include <tls.h>
@@ -36,7 +36,6 @@
# define PSEUDO(name, syscall_name, args) \
.section ".text"; \
ENTRY (name) \
- cfi_startproc; \
SINGLE_THREAD_P; \
bne- .Lpseudo_cancel; \
.type DASHDASHPFX(syscall_name##_nocancel),@function; \
@@ -66,8 +65,7 @@
ld 3,64(1); \
mtlr 9; \
mtcr 0; \
- addi 1,1,128; \
- cfi_endproc;
+ addi 1,1,128;
# define DOCARGS_0
# define UNDOCARGS_0
@@ -119,3 +117,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S
index 018132136..26885bb95 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/powerpc64/vfork.S
@@ -51,5 +51,5 @@ ENTRY (__vfork)
PSEUDO_RET
PSEUDO_END (__vfork)
-hidden_def (__vfork)
+libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c
index e1afff8a3..969078094 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
pthread_once_t *once_control = (pthread_once_t *) arg;
*once_control = 0;
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
}
@@ -74,7 +74,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
break;
/* Same generation, some other thread was faster. Wait. */
- lll_futex_wait (once_control, oldval);
+ lll_futex_wait (once_control, oldval, LLL_PRIVATE);
}
@@ -92,7 +92,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
atomic_increment (once_control);
/* Wake up all other threads. */
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c
new file mode 100644
index 000000000..90f2dc67c
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c
@@ -0,0 +1,29 @@
+/* pthread_spin_unlock -- unlock a spin lock. PowerPC version.
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ __asm __volatile (__lll_rel_instr ::: "memory");
+ *lock = 0;
+ return 0;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
index 06b3bd0ab..0082c570a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c
@@ -1,5 +1,5 @@
/* sem_post -- post to a POSIX semaphore. Powerpc version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -27,15 +27,20 @@
int
__new_sem_post (sem_t *sem)
{
- int *futex = (int *) sem;
+ struct new_sem *isem = (struct new_sem *) sem;
__asm __volatile (__lll_rel_instr ::: "memory");
- int nr = atomic_increment_val (futex);
- int err = lll_futex_wake (futex, nr);
- if (__builtin_expect (err, 0) < 0)
+ atomic_increment (&isem->value);
+ __asm __volatile (__lll_acq_instr ::: "memory");
+ if (isem->nwaiters > 0)
{
- __set_errno (-err);
- return -1;
+ int err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+ if (__builtin_expect (err, 0) < 0)
+ {
+ __set_errno (-err);
+ return -1;
+ }
}
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c
index 3e1b70f86..a1e228ee2 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pt-fork.c
@@ -19,7 +19,6 @@
#include <unistd.h>
-extern int __libc_fork (void);
pid_t
__fork (void)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c
index 7bee29784..d256ebcb0 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pt-raise.c
@@ -23,8 +23,10 @@
#include <tls.h>
#include <bits/kernel-features.h>
-extern __typeof(raise) __raise;
-int __raise (int sig)
+
+int
+raise (
+ int sig)
{
#if __ASSUME_TGKILL || defined __NR_tgkill
/* raise is an async-safe function. It could be called while the
@@ -48,6 +50,3 @@ int __raise (int sig)
return INLINE_SYSCALL (tkill, 2, THREAD_GETMEM (THREAD_SELF, tid), sig);
#endif
}
-libc_hidden_proto(raise)
-weak_alias(__raise, raise)
-libc_hidden_weak(raise)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym
new file mode 100644
index 000000000..46fbd0de7
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread-pi-defines.sym
@@ -0,0 +1,8 @@
+#include <pthreadP.h>
+
+-- These PI macros are used by assembly code.
+
+MUTEX_KIND offsetof (pthread_mutex_t, __data.__kind)
+ROBUST_BIT PTHREAD_MUTEX_ROBUST_NORMAL_NP
+PI_BIT PTHREAD_MUTEX_PRIO_INHERIT_NP
+PS_BIT PTHREAD_MUTEX_PSHARED_BIT
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c
index aec1bc31c..b4fb1a2c3 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_getaffinity.c
@@ -38,8 +38,7 @@ __pthread_attr_getaffinity_np(const pthread_attr_t *attr, size_t cpusetsize,
{
/* Check whether there are any bits set beyond the limits
the user requested. */
- size_t cnt;
- for (cnt = cpusetsize; cnt < iattr->cpusetsize; ++cnt)
+ for (size_t cnt = cpusetsize; cnt < iattr->cpusetsize; ++cnt)
if (((char *) iattr->cpuset)[cnt] != 0)
return EINVAL;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c
index 580cf2c43..609ee2ad1 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_attr_setaffinity.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -26,7 +26,7 @@
/* Defined in pthread_setaffinity.c. */
-extern size_t __kernel_cpumask_size;
+extern size_t __kernel_cpumask_size attribute_hidden;
extern int __determine_cpumask_size (pid_t tid);
libpthread_hidden_proto(__determine_cpumask_size)
@@ -57,8 +57,7 @@ pthread_attr_setaffinity_np (pthread_attr_t *attr, size_t cpusetsize,
/* Check whether the new bitmask has any bit set beyond the
last one the kernel accepts. */
- size_t cnt;
- for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
+ for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
if (((char *) cpuset)[cnt] != '\0')
/* Found a nonzero byte. This means the user request cannot be
fulfilled. */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c
index 189af774e..affcc6a65 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getaffinity.c
@@ -43,3 +43,4 @@ __pthread_getaffinity_np (pthread_t th, size_t cpusetsize, cpu_set_t *cpuset)
return 0;
}
strong_alias(__pthread_getaffinity_np, pthread_getaffinity_np)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c
index 155d3645f..9e28f69fd 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_getcpuclockid.c
@@ -1,5 +1,4 @@
-/* pthread_getcpuclockid -- Get POSIX clockid_t for a pthread_t. Linux version
- Copyright (C) 2000,2001,2002,2003,2004,2005 Free Software Foundation, Inc.
+/* Copyright (C) 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -21,17 +20,8 @@
#include <pthreadP.h>
#include <sys/time.h>
#include <tls.h>
-#include <bits/kernel-features.h>
-#include <kernel-posix-cpu-timers.h>
-#if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-int __libc_missing_posix_cpu_timers attribute_hidden;
-#endif
-#if !(__ASSUME_POSIX_TIMERS > 0)
-int __libc_missing_posix_timers attribute_hidden;
-#endif
-
int
pthread_getcpuclockid (
pthread_t threadid,
@@ -44,50 +34,6 @@ pthread_getcpuclockid (
/* Not a valid thread handle. */
return ESRCH;
-#ifdef __NR_clock_getres
- /* The clockid_t value is a simple computation from the TID.
- But we do a clock_getres call to validate it if we aren't
- yet sure we have the kernel support. */
-
- const clockid_t tidclock = MAKE_THREAD_CPUCLOCK (pd->tid, CPUCLOCK_SCHED);
-
-# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-# if !(__ASSUME_POSIX_TIMERS > 0)
- if (__libc_missing_posix_timers && !__libc_missing_posix_cpu_timers)
- __libc_missing_posix_cpu_timers = 1;
-# endif
- if (!__libc_missing_posix_cpu_timers)
- {
- INTERNAL_SYSCALL_DECL (err);
- int r = INTERNAL_SYSCALL (clock_getres, err, 2, tidclock, NULL);
- if (!INTERNAL_SYSCALL_ERROR_P (r, err))
-# endif
- {
- *clockid = tidclock;
- return 0;
- }
-
-# if !(__ASSUME_POSIX_CPU_TIMERS > 0)
-# if !(__ASSUME_POSIX_TIMERS > 0)
- if (INTERNAL_SYSCALL_ERRNO (r, err) == ENOSYS)
- {
- /* The kernel doesn't support these calls at all. */
- __libc_missing_posix_timers = 1;
- __libc_missing_posix_cpu_timers = 1;
- }
- else
-# endif
- if (INTERNAL_SYSCALL_ERRNO (r, err) == EINVAL)
- {
- /* The kernel doesn't support these clocks at all. */
- __libc_missing_posix_cpu_timers = 1;
- }
- else
- return INTERNAL_SYSCALL_ERRNO (r, err);
- }
-# endif
-#endif
-
#ifdef CLOCK_THREAD_CPUTIME_ID
/* We need to store the thread ID in the CLOCKID variable together
with a number identifying the clock. We reserve the low 3 bits
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c
index 8d887e020..3a70c3764 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_kill.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -33,7 +33,15 @@ __pthread_kill (
struct pthread *pd = (struct pthread *) threadid;
/* Make sure the descriptor is valid. */
- if (INVALID_TD_P (pd))
+ if (DEBUGGING_P && INVALID_TD_P (pd))
+ /* Not a valid thread handle. */
+ return ESRCH;
+
+ /* Force load of pd->tid into local variable or register. Otherwise
+ if a thread exits between ESRCH test and tgkill, we might return
+ EINVAL, because pd->tid would be cleared by the kernel. */
+ pid_t tid = atomic_forced_read (pd->tid);
+ if (__builtin_expect (tid <= 0, 0))
/* Not a valid thread handle. */
return ESRCH;
@@ -53,15 +61,15 @@ __pthread_kill (
int val;
#if __ASSUME_TGKILL
val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
- pd->tid, signo);
+ tid, signo);
#else
# ifdef __NR_tgkill
val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
- pd->tid, signo);
+ tid, signo);
if (INTERNAL_SYSCALL_ERROR_P (val, err)
&& INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
# endif
- val = INTERNAL_SYSCALL (tkill, err, 2, pd->tid, signo);
+ val = INTERNAL_SYSCALL (tkill, err, 2, tid, signo);
#endif
return (INTERNAL_SYSCALL_ERROR_P (val, err)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
index 047c64322..804bfab44 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
@@ -1,8 +1,14 @@
#include <pthreadP.h>
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock(mutex)
+#define LLL_MUTEX_LOCK(mutex) \
+ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
+#define LLL_MUTEX_TRYLOCK(mutex) \
+ lll_cond_trylock ((mutex)->__data.__lock)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
+ lll_robust_cond_lock ((mutex)->__data.__lock, id, \
+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
#define __pthread_mutex_lock __pthread_mutex_cond_lock
+#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full
#define NO_INCR
#include <pthread_mutex_lock.c>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c
index d28b6f2f3..467e8ec70 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_setaffinity.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -24,7 +24,7 @@
#include <sys/types.h>
-size_t __kernel_cpumask_size;
+size_t __kernel_cpumask_size attribute_hidden;
/* Determine the current affinity. As a side affect we learn
@@ -71,8 +71,7 @@ pthread_setaffinity_np (pthread_t th, size_t cpusetsize,
/* We now know the size of the kernel cpumask_t. Make sure the user
does not request to set a bit beyond that. */
- size_t cnt;
- for (cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
+ for (size_t cnt = __kernel_cpumask_size; cnt < cpusetsize; ++cnt)
if (((char *) cpuset)[cnt] != '\0')
/* Found a nonzero byte. This means the user request cannot be
fulfilled. */
@@ -80,6 +79,12 @@ pthread_setaffinity_np (pthread_t th, size_t cpusetsize,
res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, cpusetsize,
cpuset);
+
+#ifdef RESET_VGETCPU_CACHE
+ if (!INTERNAL_SYSCALL_ERROR_P (res, err))
+ RESET_VGETCPU_CACHE ();
+#endif
+
return (INTERNAL_SYSCALL_ERROR_P (res, err)
? INTERNAL_SYSCALL_ERRNO (res, err)
: 0);
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c
new file mode 100644
index 000000000..9e49085ce
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/pthread_sigqueue.c
@@ -0,0 +1,83 @@
+/* Copyright (C) 2009 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthreadP.h>
+#include <tls.h>
+#include <sysdep.h>
+#include <bits/kernel-features.h>
+
+
+int
+pthread_sigqueue (
+ pthread_t threadid,
+ int signo,
+ const union sigval value)
+{
+#ifdef __NR_rt_tgsigqueueinfo
+ struct pthread *pd = (struct pthread *) threadid;
+
+ /* Make sure the descriptor is valid. */
+ if (DEBUGGING_P && INVALID_TD_P (pd))
+ /* Not a valid thread handle. */
+ return ESRCH;
+
+ /* Force load of pd->tid into local variable or register. Otherwise
+ if a thread exits between ESRCH test and tgkill, we might return
+ EINVAL, because pd->tid would be cleared by the kernel. */
+ pid_t tid = atomic_forced_read (pd->tid);
+ if (__builtin_expect (tid <= 0, 0))
+ /* Not a valid thread handle. */
+ return ESRCH;
+
+ /* Disallow sending the signal we use for cancellation, timers, for
+ for the setxid implementation. */
+ if (signo == SIGCANCEL || signo == SIGTIMER || signo == SIGSETXID)
+ return EINVAL;
+
+ /* Set up the siginfo_t structure. */
+ siginfo_t info;
+ memset (&info, '\0', sizeof (siginfo_t));
+ info.si_signo = signo;
+ info.si_code = SI_QUEUE;
+ info.si_pid = THREAD_GETMEM (THREAD_SELF, pid);
+ info.si_uid = getuid ();
+ info.si_value = value;
+
+ /* We have a special syscall to do the work. */
+ INTERNAL_SYSCALL_DECL (err);
+
+ /* One comment: The PID field in the TCB can temporarily be changed
+ (in fork). But this must not affect this code here. Since this
+ function would have to be called while the thread is executing
+ fork, it would have to happen in a signal handler. But this is
+ no allowed, pthread_sigqueue is not guaranteed to be async-safe. */
+ int val = INTERNAL_SYSCALL (rt_tgsigqueueinfo, err, 4,
+ THREAD_GETMEM (THREAD_SELF, pid),
+ tid, signo, &info);
+
+ return (INTERNAL_SYSCALL_ERROR_P (val, err)
+ ? INTERNAL_SYSCALL_ERRNO (val, err) : 0);
+#else
+ return ENOSYS;
+#endif
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/raise.c b/libpthread/nptl/sysdeps/unix/sysv/linux/raise.c
index de794e4ab..da35cfe9f 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/raise.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/raise.c
@@ -25,8 +25,9 @@
#include <bits/kernel-features.h>
-extern __typeof(raise) __raise;
-int __raise (int sig)
+int
+raise (
+ int sig)
{
struct pthread *pd = THREAD_SELF;
#if __ASSUME_TGKILL || defined __NR_tgkill
@@ -70,6 +71,4 @@ int __raise (int sig)
return INLINE_SYSCALL (tkill, 2, selftid, sig);
#endif
}
-libc_hidden_proto(raise)
-weak_alias(__raise, raise)
-libc_hidden_weak(raise)
+libc_hidden_def (raise)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c
index f6c3de4bc..9e36858fc 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/register-atfork.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,11 +20,12 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
-#include "fork.h"
+#include <fork.h>
+#include <atomic.h>
/* Lock to protect allocation and deallocation of fork handlers. */
-lll_lock_t __fork_lock = LLL_LOCK_INITIALIZER;
+int __fork_lock = LLL_LOCK_INITIALIZER;
/* Number of pre-allocated handler entries. */
@@ -85,7 +86,7 @@ __register_atfork (
void *dso_handle)
{
/* Get the lock to not conflict with other allocations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
struct fork_handler *newp = fork_handler_alloc ();
@@ -97,12 +98,49 @@ __register_atfork (
newp->child_handler = child;
newp->dso_handle = dso_handle;
- newp->next = __fork_handlers;
- __fork_handlers = newp;
+ __linkin_atfork (newp);
}
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
return newp == NULL ? ENOMEM : 0;
}
+libc_hidden_def (__register_atfork)
+
+
+void
+attribute_hidden
+__linkin_atfork (struct fork_handler *newp)
+{
+ do
+ newp->next = __fork_handlers;
+ while (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
+ newp, newp->next) != 0);
+}
+
+
+libc_freeres_fn (free_mem)
+{
+ /* Get the lock to not conflict with running forks. */
+ lll_lock (__fork_lock, LLL_PRIVATE);
+
+ /* No more fork handlers. */
+ __fork_handlers = NULL;
+
+ /* Free eventually alloated memory blocks for the object pool. */
+ struct fork_handler_pool *runp = fork_handler_pool.next;
+
+ memset (&fork_handler_pool, '\0', sizeof (fork_handler_pool));
+
+ /* Release the lock. */
+ lll_unlock (__fork_lock, LLL_PRIVATE);
+
+ /* We can free the memory after releasing the lock. */
+ while (runp != NULL)
+ {
+ struct fork_handler_pool *oldp = runp;
+ runp = runp->next;
+ free (oldp);
+ }
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c
index 7f0b67918..7ed0df89a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_post.c
@@ -1,5 +1,5 @@
/* sem_post -- post to a POSIX semaphore. Generic futex-using version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -27,14 +27,30 @@
int
__new_sem_post (sem_t *sem)
{
- int *futex = (int *) sem;
+ struct new_sem *isem = (struct new_sem *) sem;
- int nr = atomic_increment_val (futex);
- int err = lll_futex_wake (futex, nr);
- if (__builtin_expect (err, 0) < 0)
+ __typeof (isem->value) cur;
+ do
{
- __set_errno (-err);
- return -1;
+ cur = isem->value;
+ if (isem->value == SEM_VALUE_MAX)
+ {
+ __set_errno (EOVERFLOW);
+ return -1;
+ }
+ }
+ while (atomic_compare_and_exchange_bool_acq (&isem->value, cur + 1, cur));
+
+ atomic_full_barrier ();
+ if (isem->nwaiters > 0)
+ {
+ int err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+ if (__builtin_expect (err, 0) < 0)
+ {
+ __set_errno (-err);
+ return -1;
+ }
}
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
index 79b11948c..3e5e6dcae 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
@@ -1,5 +1,5 @@
/* sem_timedwait -- wait on a semaphore. Generic futex-using version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -23,38 +23,40 @@
#include <lowlevellock.h>
#include <internaltypes.h>
#include <semaphore.h>
+
#include <pthreadP.h>
+extern void __sem_wait_cleanup (void *arg) attribute_hidden;
+
+
int
sem_timedwait (sem_t *sem, const struct timespec *abstime)
{
- /* First check for cancellation. */
- CANCELLATION_P (THREAD_SELF);
-
- int *futex = (int *) sem;
- int val;
+ struct new_sem *isem = (struct new_sem *) sem;
int err;
- if (*futex > 0)
+ if (atomic_decrement_if_positive (&isem->value) > 0)
+ return 0;
+
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
{
- val = atomic_decrement_if_positive (futex);
- if (val > 0)
- return 0;
+ __set_errno (EINVAL);
+ return -1;
}
- err = -EINVAL;
- if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
- goto error_return;
+ atomic_increment (&isem->nwaiters);
+
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
- do
+ while (1)
{
struct timeval tv;
struct timespec rt;
int sec, nsec;
/* Get the current time. */
- gettimeofday (&tv, NULL);
+ __gettimeofday (&tv, NULL);
/* Compute relative timeout. */
sec = abstime->tv_sec - tv.tv_sec;
@@ -68,7 +70,11 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
/* Already timed out? */
err = -ETIMEDOUT;
if (sec < 0)
- goto error_return;
+ {
+ __set_errno (ETIMEDOUT);
+ err = -1;
+ break;
+ }
/* Do wait. */
rt.tv_sec = sec;
@@ -77,21 +83,29 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
/* Enable asynchronous cancellation. Required by the standard. */
int oldtype = __pthread_enable_asynccancel ();
- err = lll_futex_timed_wait (futex, 0, &rt);
+ err = lll_futex_timed_wait (&isem->value, 0, &rt,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
if (err != 0 && err != -EWOULDBLOCK)
- goto error_return;
+ {
+ __set_errno (-err);
+ err = -1;
+ break;
+ }
- val = atomic_decrement_if_positive (futex);
+ if (atomic_decrement_if_positive (&isem->value) > 0)
+ {
+ err = 0;
+ break;
+ }
}
- while (val <= 0);
- return 0;
+ pthread_cleanup_pop (0);
+
+ atomic_decrement (&isem->nwaiters);
- error_return:
- __set_errno (-err);
- return -1;
+ return err;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c
index 416d8634b..e661e09c8 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sem_wait.c
@@ -1,5 +1,5 @@
/* sem_wait -- wait on a semaphore. Generic futex-using version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
@@ -23,34 +23,62 @@
#include <lowlevellock.h>
#include <internaltypes.h>
#include <semaphore.h>
+
#include <pthreadP.h>
+void
+attribute_hidden
+__sem_wait_cleanup (void *arg)
+{
+ struct new_sem *isem = (struct new_sem *) arg;
+
+ atomic_decrement (&isem->nwaiters);
+}
+
+
int
__new_sem_wait (sem_t *sem)
{
- /* First check for cancellation. */
- CANCELLATION_P (THREAD_SELF);
-
- int *futex = (int *) sem;
+ struct new_sem *isem = (struct new_sem *) sem;
int err;
- do
- {
- if (atomic_decrement_if_positive (futex) > 0)
- return 0;
+ if (atomic_decrement_if_positive (&isem->value) > 0)
+ return 0;
+ atomic_increment (&isem->nwaiters);
+
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+ while (1)
+ {
/* Enable asynchronous cancellation. Required by the standard. */
int oldtype = __pthread_enable_asynccancel ();
- err = lll_futex_wait (futex, 0);
+ err = lll_futex_wait (&isem->value, 0,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
/* Disable asynchronous cancellation. */
__pthread_disable_asynccancel (oldtype);
+
+ if (err != 0 && err != -EWOULDBLOCK)
+ {
+ __set_errno (-err);
+ err = -1;
+ break;
+ }
+
+ if (atomic_decrement_if_positive (&isem->value) > 0)
+ {
+ err = 0;
+ break;
+ }
}
- while (err == 0 || err == -EWOULDBLOCK);
- __set_errno (-err);
- return -1;
+ pthread_cleanup_pop (0);
+
+ atomic_decrement (&isem->nwaiters);
+
+ return err;
}
weak_alias(__new_sem_wait, sem_wait)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch
index 9bd7569c9..940bd62f8 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/Makefile.arch
@@ -7,8 +7,8 @@
libpthread_SSRC = pt-vfork.S pthread_once.S pthread_rwlock_wrlock.S \
pthread_rwlock_rdlock.S pthread_rwlock_unlock.S \
- lowlevellock.S pthread_barrier_wait.S pthread_cond_broadcast.S \
- pthread_cond_signal.S \
+ lowlevellock.S lowlevelrobustlock.S pthread_barrier_wait.S \
+ pthread_cond_broadcast.S pthread_cond_signal.S \
pthread_rwlock_timedwrlock.S pthread_rwlock_timedrdlock.S \
sem_post.S sem_timedwait.S sem_trywait.S sem_wait.S
@@ -43,6 +43,7 @@ ASFLAGS-sem_wait.S = -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-libc-lowlevellock.S = -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-lowlevellock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
+ASFLAGS-lowlevelrobustlock.S = -DNOT_IN_libc=1 -DIS_IN_libpthread=1 -D_LIBC_REENTRANT -DUSE___THREAD
ASFLAGS-clone.S = -D_LIBC_REENTRANT
ASFLAGS-vfork.S = -D_LIBC_REENTRANT
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
index 969686dd5..badcda570 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,6 +21,8 @@
#ifndef _BITS_PTHREADTYPES_H
#define _BITS_PTHREADTYPES_H 1
+#include <endian.h>
+
#define __SIZEOF_PTHREAD_ATTR_T 36
#define __SIZEOF_PTHREAD_MUTEX_T 24
#define __SIZEOF_PTHREAD_MUTEXATTR_T 4
@@ -127,9 +130,21 @@ typedef union
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __shared;
+ /* FLAGS must stay at this position in the structure to maintain
+ binary compatibility. */
+ unsigned char __flags;
+#else
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
+ unsigned char __shared;
+ unsigned char __pad1;
+ unsigned char __pad2;
+#endif
pthread_t __writer;
} __data;
char __size[__SIZEOF_PTHREAD_RWLOCK_T];
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h
index e6c5d845c..934493c30 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/bits/semaphore.h
@@ -28,9 +28,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c
index dcedd2625..6868b9bcd 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/fork.c
@@ -18,7 +18,6 @@
#include <sched.h>
#include <signal.h>
-#include <stdio.h>
#include <sysdep.h>
#include <tls.h>
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
index 94a24b46e..feb82110c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/libc-lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
index 062ce2871..c7028360f 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevel-atomic.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -44,16 +44,16 @@
mov.l reg, mem; \
99: mov r1, r15
-#define XADD(reg, mem, new, old) \
+#define XADD(reg, mem, old, tmp) \
.align 2; \
mova 99f, r0; \
nop; \
mov r15, r1; \
- mov _IMM4, r15; \
+ mov _IMM8, r15; \
98: mov.l mem, old; \
- mov old, new; \
- add reg, new; \
- mov.l new, mem; \
+ mov reg, tmp; \
+ add old, tmp; \
+ mov.l tmp, mem; \
99: mov r1, r15
#define XCHG(reg, mem, old) \
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S
index a5c916bb7..6d4036496 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.S
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,73 +19,320 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
.text
-#define SYS_gettimeofday __NR_gettimeofday
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg; \
+ extu.b reg, reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+ mov #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg; \
+ extu.b reg, reg
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ mov #(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+ swap.b tmp2, tmp2; \
+ or tmp2, tmp; \
+ xor tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+ mov #(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ bra 98f ; \
+ mov.l @tmp, reg ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98:
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ mov.l @tmp, reg ; \
+ bra 98f ; \
+ mov #FUTEX_WAIT, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: or tmp, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, reg ; \
+ add reg, tmp ; \
+ mov.l @tmp, reg ; \
+ bra 98f ; \
+ mov #FUTEX_WAKE, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: or tmp, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg
+# else
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAIT, tmp ; \
+ or tmp, reg
+# endif
+# define LOAD_FUTEX_WAIT_ABS(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAIT_BITSET, tmp ; \
+ mov #(FUTEX_CLOCK_REALTIME >> 8), tmp2; \
+ swap.b tmp2, tmp2; \
+ or tmp2, tmp; \
+ or tmp, reg
+# define LOAD_FUTEX_WAKE(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAKE, tmp ; \
+ or tmp, reg
+#endif
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 5
- /* void __lll_mutex_lock_wait (int val, int *__futex) */
-__lll_mutex_lock_wait:
- mov #2, r6
- cmp/eq r4, r6
- mov r5, r4
+ cfi_startproc
+__lll_lock_wait_private:
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r4, r6
+ mov r5, r8
mov #0, r7 /* No timeout. */
- mov #FUTEX_WAIT, r5
+ LOAD_PRIVATE_FUTEX_WAIT (r5, r0, r1)
+ mov #2, r4
+ cmp/eq r4, r6
bf 2f
1:
+ mov r8, r4
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
2:
- XCHG (r6, @r4, r2)
+ mov #2, r6
+ XCHG (r6, @r8, r2)
tst r2, r2
bf 1b
+ mov.l @r15+, r8
rts
- nop
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ mov r2, r0
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
.align 5
- /* int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime) */
-__lll_mutex_timedlock_wait:
+ cfi_startproc
+__lll_lock_wait:
+ mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r6, r9
+ mov r4, r6
+ mov r5, r8
+ mov #0, r7 /* No timeout. */
+ mov r9, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
+
+ mov #2, r4
+ cmp/eq r4, r6
+ bf 2f
+
+1:
+ mov r8, r4
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+
+2:
+ mov #2, r6
+ XCHG (r6, @r8, r2)
+ tst r2, r2
+ bf 1b
+
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ ret
+ mov r2, r0
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* r5 (r8): futex
+ r7 (r11): flags
+ r6 (r9): timeout
+ r4 (r10): futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
+ .align 5
+ cfi_startproc
+__lll_timedlock_wait:
+ mov.l r12, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r12, 0)
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ mov.l .Lhave, r1
+# ifdef PIC
+ mova .Lgot, r0
+ mov.l .Lgot, r12
+ add r0, r12
+ add r12, r1
+# endif
+ mov.l @r1, r0
+ tst r0, r0
+ bt .Lreltmo
+# endif
+
+ mov r4, r2
+ mov r5, r4
+ mov r7, r5
+ mov r6, r7
+ LOAD_FUTEX_WAIT_ABS (r5, r0, r1)
+
+ mov #2, r6
+ cmp/eq r6, r2
+ bf/s 2f
+ mov r6, r2
+
+1:
+ mov #2, r6
+ mov #-1, r1
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x16
+ SYSCALL_INST_PAD
+ mov r0, r6
+
+2:
+ XCHG (r2, @r4, r3) /* NB: lock is implied */
+
+ tst r3, r3
+ bt/s 3f
+ mov r6, r0
+
+ cmp/eq #-ETIMEDOUT, r0
+ bt 4f
+ cmp/eq #-EINVAL, r0
+ bf 1b
+4:
+ neg r0, r3
+3:
+ mov r3, r0
+ rts
+ mov.l @r15+, r12
+
+ .align 2
+# ifdef PIC
+.Lgot:
+ .long _GLOBAL_OFFSET_TABLE_
+.Lhave:
+ .long __have_futex_clock_realtime@GOTOFF
+# else
+.Lhave:
+ .long __have_futex_clock_realtime
+# endif
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
mov.l @(4,r6), r1
mov.l .L1g, r0
cmp/hs r0, r1
bt 3f
+ mov.l r11, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r11, 0)
+ mov.l r10, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r10, 0)
mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r7, r11
+ mov r4, r10
mov r6, r9
mov r5, r8
/* Stack frame for the timespec and timeval structs. */
add #-8, r15
+ cfi_adjust_cfa_offset(8)
+
+ mov #2, r2
+ XCHG (r2, @r8, r3)
+
+ tst r3, r3
+ bt 6f
1:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -105,56 +353,49 @@ __lll_mutex_timedlock_wait:
add #-1, r2
4:
cmp/pz r2
- bf 5f /* Time is already up. */
+ bf 2f /* Time is already up. */
mov.l r2, @r15 /* Store relative timeout. */
mov.l r3, @(4,r15)
- mov #1, r3
- mov #2, r6
- CMPXCHG (r3, @r8, r6, r2)
- tst r2, r2
- bt 8f
-
mov r8, r4
- mov #FUTEX_WAIT, r5
+ mov r11, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
+ mov r10, r6
mov r15, r7
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
- mov r0, r4
+ mov r0, r5
-8:
- mov #0, r3
- CMPXCHG (r3, @r8, r6, r2)
- bf/s 7f
- mov #0, r0
+ mov #2, r2
+ XCHG (r2, @r8, r3)
+
+ tst r3, r3
+ bt/s 6f
+ mov #-ETIMEDOUT, r1
+ cmp/eq r5, r1
+ bf 1b
+
+2: mov #ETIMEDOUT, r3
6:
+ mov r3, r0
add #8, r15
mov.l @r15+, r8
- rts
mov.l @r15+, r9
-7:
- /* Check whether the time expired. */
- mov #-ETIMEDOUT, r1
- cmp/eq r5, r1
- bt 5f
+ mov.l @r15+, r10
+ mov.l @r15+, r11
+ rts
+ mov.l @r15+, r12
- /* Make sure the current holder knows we are going to sleep. */
- XCHG (r2, @r8, r3)
- tst r3, r3
- bt/s 6b
- mov #0, r0
- bra 1b
- nop
3:
+ mov.l @r15+, r12
rts
- mov #EINVAL, r0
-5:
- bra 6b
- mov #ETIMEDOUT, r0
+ mov #EINVAL, r0
+# endif
+ cfi_endproc
.L1k:
.word 1000
@@ -162,21 +403,16 @@ __lll_mutex_timedlock_wait:
.L1g:
.long 1000000000
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-
-#ifdef NOT_IN_libc
- .globl lll_unlock_wake_cb
- .type lll_unlock_wake_cb,@function
- .hidden lll_unlock_wake_cb
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 5
-lll_unlock_wake_cb:
- DEC (@r4, r2)
- tst r2, r2
- bt 1f
-
- mov #FUTEX_WAKE, r5
+ cfi_startproc
+__lll_unlock_wake_private:
+ LOAD_PRIVATE_FUTEX_WAKE (r5, r0, r1)
mov #1, r6 /* Wake one thread. */
mov #0, r7
mov.l r7, @r4 /* Stores 0. */
@@ -184,21 +420,19 @@ lll_unlock_wake_cb:
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
-
-1:
rts
- nop
- .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+ nop
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 5
- /* void __lll_mutex_unlock_wake(int *__futex) */
-__lll_mutex_unlock_wake:
- mov #FUTEX_WAKE, r5
+ cfi_startproc
+__lll_unlock_wake:
+ LOAD_FUTEX_WAKE (r5, r0, r1)
mov #1, r6 /* Wake one thread. */
mov #0, r7
mov.l r7, @r4 /* Stores 0. */
@@ -207,29 +441,34 @@ __lll_mutex_unlock_wake:
trapa #0x14
SYSCALL_INST_PAD
rts
- nop
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
-
+ nop
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
.align 5
+ cfi_startproc
__lll_timedwait_tid:
mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
mov r4, r8
mov r5, r9
/* Stack frame for the timespec and timeval structs. */
add #-8, r15
+ cfi_adjust_cfa_offset(8)
2:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -260,7 +499,10 @@ __lll_timedwait_tid:
bt 4f
mov r8, r4
- mov #FUTEX_WAIT, r5
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
+ mov #0, r5
+ extu.b r5, r5
mov r2, r6
mov r15, r7
mov #SYS_futex, r3
@@ -277,7 +519,7 @@ __lll_timedwait_tid:
add #8, r15
mov.l @r15+, r8
rts
- mov.l @r15+, r9
+ mov.l @r15+, r9
1:
/* Check whether the time expired. */
mov #-ETIMEDOUT, r1
@@ -285,7 +527,8 @@ __lll_timedwait_tid:
bf 2b
6:
bra 3b
- mov #ETIMEDOUT, r0
+ mov #ETIMEDOUT, r0
+ cfi_endproc
.L1k2:
.word 1000
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
index 45339f5e0..d7fada991 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,28 +20,107 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <syscall.h>
+#ifndef __ASSEMBLER__
#include <time.h>
#include <sys/param.h>
#include <bits/pthreadtypes.h>
+#include <bits/kernel-features.h>
+#endif
+#define SYS_futex 240
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
+#ifndef __ASSEMBLER__
/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
-
-extern int __lll_mutex_lock_wait (int val, int *__futex) attribute_hidden;
-extern int __lll_mutex_timedlock_wait (int val, int *__futex,
- const struct timespec *abstime)
- attribute_hidden;
-extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
-
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
+
+extern int __lll_lock_wait_private (int val, int *__futex)
+ attribute_hidden;
+extern int __lll_lock_wait (int val, int *__futex, int private)
+ attribute_hidden;
+extern int __lll_timedlock_wait (int val, int *__futex,
+ const struct timespec *abstime, int private)
+ attribute_hidden;
+extern int __lll_robust_lock_wait (int val, int *__futex, int private)
+ attribute_hidden;
+extern int __lll_robust_timedlock_wait (int val, int *__futex,
+ const struct timespec *abstime,
+ int private)
+ attribute_hidden;
+extern int __lll_unlock_wake_private (int *__futex) attribute_hidden;
+extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;
+
+#define lll_trylock(futex) \
+ ({ unsigned char __result; \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%1,r2\n\
+ cmp/eq r2,%3\n\
+ bf 1f\n\
+ mov.l %2,@%1\n\
+ 1: mov r1,r15\n\
+ mov #-1,%0\n\
+ negc %0,%0"\
+ : "=r" (__result) \
+ : "r" (&(futex)), \
+ "r" (LLL_LOCK_INITIALIZER_LOCKED), \
+ "r" (LLL_LOCK_INITIALIZER) \
+ : "r0", "r1", "r2", "t", "memory"); \
+ __result; })
-#define lll_mutex_trylock(futex) \
+#define lll_robust_trylock(futex, id) \
({ unsigned char __result; \
__asm __volatile ("\
.align 2\n\
@@ -57,12 +137,12 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
negc %0,%0"\
: "=r" (__result) \
: "r" (&(futex)), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "r" (id), \
+ "r" (LLL_LOCK_INITIALIZER) \
: "r0", "r1", "r2", "t", "memory"); \
__result; })
-#define lll_mutex_cond_trylock(futex) \
+#define lll_cond_trylock(futex) \
({ unsigned char __result; \
__asm __volatile ("\
.align 2\n\
@@ -79,12 +159,12 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
negc %0,%0"\
: "=r" (__result) \
: "r" (&(futex)), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "r" (LLL_MUTEX_LOCK_INITIALIZER) \
+ "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "r" (LLL_LOCK_INITIALIZER) \
: "r0", "r1", "r2", "t", "memory"); \
__result; })
-#define lll_mutex_lock(futex) \
+#define lll_lock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -100,11 +180,37 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
: "=&r" (__result) : "r" (1), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __lll_mutex_lock_wait (__result, __futex); })
+ { \
+ if (__builtin_constant_p (private) \
+ && (private) == LLL_PRIVATE) \
+ __lll_lock_wait_private (__result, __futex); \
+ else \
+ __lll_lock_wait (__result, __futex, (private)); \
+ } \
+ })
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int __result, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_lock_wait (__result, __futex, private); \
+ __result; })
/* Special version of lll_mutex_lock which causes the unlock function to
always wakeup waiters. */
-#define lll_mutex_cond_lock(futex) \
+#define lll_cond_lock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -120,9 +226,28 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
: "=&r" (__result) : "r" (2), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __lll_mutex_lock_wait (__result, __futex); })
+ __lll_lock_wait (__result, __futex, private); })
-#define lll_mutex_timedlock(futex, timeout) \
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int __result, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id | FUTEX_WAITERS), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_lock_wait (__result, __futex, private); \
+ __result; })
+
+#define lll_timedlock(futex, timeout, private) \
({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -138,10 +263,30 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
: "=&r" (__result) : "r" (1), "r" (__futex) \
: "r0", "r1", "t", "memory"); \
if (__result) \
- __result = __lll_mutex_timedlock_wait (__result, __futex, timeout); \
+ __result = __lll_timedlock_wait (__result, __futex, timeout, private); \
__result; })
-#define lll_mutex_unlock(futex) \
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int __result, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ nop\n\
+ mov r15,r1\n\
+ mov #-8,r15\n\
+ 0: mov.l @%2,%0\n\
+ tst %0,%0\n\
+ bf 1f\n\
+ mov.l %1,@%2\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (id), "r" (__futex) \
+ : "r0", "r1", "t", "memory"); \
+ if (__result) \
+ __result = __lll_robust_timedlock_wait (__result, __futex, \
+ timeout, private); \
+ __result; })
+
+#define lll_unlock(futex, private) \
(void) ({ int __result, *__futex = &(futex); \
__asm __volatile ("\
.align 2\n\
@@ -155,22 +300,45 @@ extern int __lll_mutex_unlock_wake (int *__futex) attribute_hidden;
: "=&r" (__result) : "r" (__futex) \
: "r0", "r1", "memory"); \
if (__result) \
- __lll_mutex_unlock_wake (__futex); })
-
-#define lll_mutex_islocked(futex) \
- (futex != 0)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
+ { \
+ if (__builtin_constant_p (private) \
+ && (private) == LLL_PRIVATE) \
+ __lll_unlock_wake_private (__futex); \
+ else \
+ __lll_unlock_wake (__futex, (private)); \
+ } \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ (void) ({ int __result, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ mov r15,r1\n\
+ mov #-6,r15\n\
+ 0: mov.l @%1,%0\n\
+ and %2,%0\n\
+ mov.l %0,@%1\n\
+ 1: mov r1,r15"\
+ : "=&r" (__result) : "r" (__futex), "r" (FUTEX_WAITERS) \
+ : "r0", "r1", "memory"); \
+ if (__result) \
+ __lll_unlock_wake (__futex, private); })
+#define lll_robust_dead(futex, private) \
+ (void) ({ int __ignore, *__futex = &(futex); \
+ __asm __volatile ("\
+ .align 2\n\
+ mova 1f,r0\n\
+ mov r15,r1\n\
+ mov #-6,r15\n\
+ 0: mov.l @%1,%0\n\
+ or %2,%0\n\
+ mov.l %0,@%1\n\
+ 1: mov r1,r15"\
+ : "=&r" (__ignore) : "r" (__futex), "r" (FUTEX_OWNER_DIED) \
+ : "r0", "r1", "memory"); \
+ lll_futex_wake (__futex, 1, private); })
# ifdef NEED_SYSCALL_INST_PAD
# define SYSCALL_WITH_INST_PAD "\
@@ -180,27 +348,17 @@ typedef int lll_lock_t;
trapa #0x14"
# endif
-#define lll_futex_wait(futex, val) \
- do { \
- int __ignore; \
- register unsigned long __r3 __asm ("r3") = SYS_futex; \
- register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
- register unsigned long __r5 __asm ("r5") = FUTEX_WAIT; \
- register unsigned long __r6 __asm ("r6") = (unsigned long) (val); \
- register unsigned long __r7 __asm ("r7") = 0; \
- __asm __volatile (SYSCALL_WITH_INST_PAD \
- : "=z" (__ignore) \
- : "r" (__r3), "r" (__r4), "r" (__r5), \
- "r" (__r6), "r" (__r7) \
- : "memory", "t"); \
- } while (0)
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait (futex, val, NULL, private)
+
-#define lll_futex_timed_wait(futex, val, timeout) \
+#define lll_futex_timed_wait(futex, val, timeout, private) \
({ \
int __status; \
register unsigned long __r3 __asm ("r3") = SYS_futex; \
register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
- register unsigned long __r5 __asm ("r5") = FUTEX_WAIT; \
+ register unsigned long __r5 __asm ("r5") \
+ = __lll_private_flag (FUTEX_WAIT, private); \
register unsigned long __r6 __asm ("r6") = (unsigned long) (val); \
register unsigned long __r7 __asm ("r7") = (timeout); \
__asm __volatile (SYSCALL_WITH_INST_PAD \
@@ -212,12 +370,13 @@ typedef int lll_lock_t;
})
-#define lll_futex_wake(futex, nr) \
+#define lll_futex_wake(futex, nr, private) \
do { \
int __ignore; \
register unsigned long __r3 __asm ("r3") = SYS_futex; \
register unsigned long __r4 __asm ("r4") = (unsigned long) (futex); \
- register unsigned long __r5 __asm ("r5") = FUTEX_WAKE; \
+ register unsigned long __r5 __asm ("r5") \
+ = __lll_private_flag (FUTEX_WAKE, private); \
register unsigned long __r6 __asm ("r6") = (unsigned long) (nr); \
register unsigned long __r7 __asm ("r7") = 0; \
__asm __volatile (SYSCALL_WITH_INST_PAD \
@@ -228,33 +387,19 @@ typedef int lll_lock_t;
} while (0)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
-
-#define lll_trylock(futex) lll_mutex_trylock (futex)
-#define lll_lock(futex) lll_mutex_lock (futex)
-#define lll_unlock(futex) lll_mutex_unlock (futex)
-
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
afterwards. */
-extern int __lll_wait_tid (int *tid) attribute_hidden;
#define lll_wait_tid(tid) \
do { \
__typeof (tid) *__tid = &(tid); \
while (*__tid != 0) \
- lll_futex_wait (__tid, *__tid); \
+ lll_futex_wait (__tid, *__tid, LLL_SHARED); \
} while (0)
extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
@@ -271,24 +416,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
new file mode 100644
index 000000000..dab1ae4ab
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevelrobustlock.S
@@ -0,0 +1,264 @@
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevellock.h>
+#include <lowlevelrobustlock.h>
+#include <bits/kernel-features.h>
+#include "lowlevel-atomic.h"
+
+ .text
+
+#define FUTEX_WAITERS 0x80000000
+#define FUTEX_OWNER_DIED 0x40000000
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ mov #(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), tmp; \
+ extu.b tmp, tmp; \
+ xor tmp, reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg
+# else
+# define LOAD_FUTEX_WAIT(reg,tmp,tmp2) \
+ stc gbr, tmp ; \
+ mov.w 99f, tmp2 ; \
+ add tmp2, tmp ; \
+ mov.l @tmp, tmp2 ; \
+ bra 98f ; \
+ mov #FUTEX_PRIVATE_FLAG, tmp ; \
+99: .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE ; \
+98: extu.b tmp, tmp ; \
+ xor tmp, reg ; \
+ and tmp2, reg ; \
+ mov #FUTEX_WAIT, tmp ; \
+ or tmp, reg
+# endif
+#endif
+
+ .globl __lll_robust_lock_wait
+ .type __lll_robust_lock_wait,@function
+ .hidden __lll_robust_lock_wait
+ .align 5
+ cfi_startproc
+__lll_robust_lock_wait:
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r5, r8
+ mov #0, r7 /* No timeout. */
+ mov r6, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
+
+4:
+ mov r4, r6
+ mov.l .L_FUTEX_WAITERS, r0
+ or r0, r6
+ shlr r0 /* r0 = FUTEX_OWNER_DIED */
+ tst r0, r4
+ bf/s 3f
+ cmp/eq r4, r6
+ bt 1f
+
+ CMPXCHG (r4, @r8, r6, r2)
+ bf 2f
+
+1:
+ mov r8, r4
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+
+ mov.l @r8, r2
+
+2:
+ tst r2, r2
+ bf/s 4b
+ mov r2, r4
+
+ stc gbr, r1
+ mov.w .Ltidoff, r2
+ add r2, r1
+ mov.l @r1, r6
+ mov #0, r3
+ CMPXCHG (r3, @r8, r6, r4)
+ bf 4b
+ mov #0, r4
+
+3:
+ mov.l @r15+, r8
+ ret
+ mov r4, r0
+ cfi_endproc
+ .align 2
+.L_FUTEX_WAITERS:
+ .long FUTEX_WAITERS
+.Ltidoff:
+ .word TID - TLS_PRE_TCB_SIZE
+ .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
+
+
+ .globl __lll_robust_timedlock_wait
+ .type __lll_robust_timedlock_wait,@function
+ .hidden __lll_robust_timedlock_wait
+ .align 5
+ cfi_startproc
+__lll_robust_timedlock_wait:
+ /* Check for a valid timeout value. */
+ mov.l @(4,r6), r1
+ mov.l .L1g, r0
+ cmp/hs r0, r1
+ bt 3f
+
+ mov.l r11, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r11, 0)
+ mov.l r10, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r10, 0)
+ mov.l r9, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r9, 0)
+ mov.l r8, @-r15
+ cfi_adjust_cfa_offset(4)
+ cfi_rel_offset (r8, 0)
+ mov r7, r11
+ mov r4, r10
+ mov r6, r9
+ mov r5, r8
+
+ /* Stack frame for the timespec and timeval structs. */
+ add #-8, r15
+ cfi_adjust_cfa_offset(8)
+
+1:
+ /* Get current time. */
+ mov r15, r4
+ mov #0, r5
+ mov #__NR_gettimeofday, r3
+ trapa #0x12
+ SYSCALL_INST_PAD
+
+ /* Compute relative timeout. */
+ mov.l @(4,r15), r0
+ mov.w .L1k, r1
+ dmulu.l r0, r1 /* Micro seconds to nano seconds. */
+ mov.l @r9, r2
+ mov.l @(4,r9), r3
+ mov.l @r15, r0
+ sts macl, r1
+ sub r0, r2
+ clrt
+ subc r1, r3
+ bf 4f
+ mov.l .L1g, r1
+ add r1, r3
+ add #-1, r2
+4:
+ cmp/pz r2
+ bf 8f /* Time is already up. */
+
+ mov.l r2, @r15 /* Store relative timeout. */
+ mov.l r3, @(4,r15)
+
+ mov r10, r6
+ mov.l .L_FUTEX_WAITERS2, r0
+ or r0, r6
+ shlr r0 /* r0 = FUTEX_OWNER_DIED */
+ tst r0, r4
+ bf/s 6f
+ cmp/eq r4, r6
+ bt 2f
+
+ CMPXCHG (r4, @r8, r6, r2)
+ bf/s 5f
+ mov #0, r5
+
+2:
+ mov r8, r4
+ mov r11, r5
+ LOAD_FUTEX_WAIT (r5, r0, r1)
+ mov r10, r6
+ mov r15, r7
+ mov #SYS_futex, r3
+ extu.b r3, r3
+ trapa #0x14
+ SYSCALL_INST_PAD
+ mov r0, r5
+
+ mov.l @r8, r2
+
+5:
+ tst r2, r2
+ bf/s 7f
+ mov r2, r10
+
+ stc gbr, r1
+ mov.w .Ltidoff2, r2
+ add r2, r1
+ mov.l @r1, r4
+ mov #0, r3
+ CMPXCHG (r3, @r8, r4, r10)
+ bf 7f
+ mov #0, r0
+
+6:
+ add #8, r15
+ mov.l @r15+, r8
+ mov.l @r15+, r9
+ mov.l @r15+, r10
+ rts
+ mov.l @r15+, r11
+
+7:
+ /* Check whether the time expired. */
+ mov #-ETIMEDOUT, r1
+ cmp/eq r5, r1
+ bf 1b
+
+8:
+ bra 6b
+ mov #ETIMEDOUT, r0
+3:
+ rts
+ mov #EINVAL, r0
+ cfi_endproc
+ .align 2
+.L_FUTEX_WAITERS2:
+ .long FUTEX_WAITERS
+.L1g:
+ .long 1000000000
+.Ltidoff2:
+ .word TID - TLS_PRE_TCB_SIZE
+.L1k:
+ .word 1000
+ .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h
index eb83653d3..acf1a617e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/not-cancel.h
@@ -1,82 +1 @@
-/* Uncancelable versions of cancelable interfaces. Linux/NPTL version.
- Copyright (C) 2003 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
-
-#include <sysdep.h>
-
-#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
-extern int __open_nocancel (const char *, int, ...) attribute_hidden;
-extern int __close_nocancel (int) attribute_hidden;
-extern int __read_nocancel (int, void *, size_t) attribute_hidden;
-extern int __write_nocancel (int, const void *, size_t) attribute_hidden;
-extern pid_t __waitpid_nocancel (pid_t, int *, int) attribute_hidden;
-
-libc_hidden_proto(__open_nocancel)
-libc_hidden_proto(__close_nocancel)
-libc_hidden_proto(__read_nocancel)
-libc_hidden_proto(__write_nocancel)
-libc_hidden_proto(__waitpid_nocancel)
-
-#else
-#define __open_nocancel(name, ...) __open (name, __VA_ARGS__)
-#define __close_nocancel(fd) __close (fd)
-#define __read_nocancel(fd, buf, len) __read (fd, buf, len)
-#define __write_nocancel(fd, buf, len) __write (fd, buf, len)
-#define __waitpid_nocancel(pid, stat_loc, options) \
- __waitpid (pid, stat_loc, options)
-#endif
-
-/* Uncancelable open. */
-#define open_not_cancel(name, flags, mode) \
- __open_nocancel (name, flags, mode)
-#define open_not_cancel_2(name, flags) \
- __open_nocancel (name, flags)
-
-/* Uncancelable close. */
-#define close_not_cancel(fd) \
- __close_nocancel (fd)
-#define close_not_cancel_no_status(fd) \
- (void) ({ INTERNAL_SYSCALL_DECL (err); \
- INTERNAL_SYSCALL (close, err, 1, (fd)); })
-
-/* Uncancelable read. */
-#define read_not_cancel(fd, buf, n) \
- __read_nocancel (fd, buf, n)
-
-/* Uncancelable write. */
-#define write_not_cancel(fd, buf, n) \
- __write_nocancel (fd, buf, n)
-
-/* Uncancelable writev. */
-#define writev_not_cancel_no_status(fd, iov, n) \
- (void) ({ INTERNAL_SYSCALL_DECL (err); \
- INTERNAL_SYSCALL (writev, err, 3, (fd), (iov), (n)); })
-
-/* Uncancelable fcntl. */
-#define fcntl_not_cancel(fd, cmd, val) \
- __fcntl_nocancel (fd, cmd, val)
-
-/* Uncancelable waitpid. */
-#ifdef __NR_waitpid
-# define waitpid_not_cancel(pid, stat_loc, options) \
- __waitpid_nocancel (pid, stat_loc, options)
-#else
-# define waitpid_not_cancel(pid, stat_loc, options) \
- INLINE_SYSCALL (wait4, 4, pid, stat_loc, options, NULL)
-#endif
+#include "../i386/not-cancel.h"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c
index 5391d5cc8..82c97c352 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pt-initfini.c
@@ -1,5 +1,5 @@
-/* Special .init and .fini section support for SH. NPTL version.
- Copyright (C) 2003 Free Software Foundation, Inc.
+/* Special .init and .fini section support for SH. NPTL version.
+ Copyright (C) 2003, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it
@@ -62,19 +62,12 @@ _init:\n\
mov.l .L24,r1\n\
add r0,r1\n\
jsr @r1\n\
- nop\n\
- mova .L23,r0\n\
- mov.l .L23,r1\n\
- add r0,r1\n\
- jsr @r1\n\
mov r15,r14\n\
bra 1f\n\
nop\n\
.align 2\n\
.L22:\n\
.long _GLOBAL_OFFSET_TABLE_\n\
-.L23:\n\
- .long __gmon_start__@PLT\n\
.L24:\n\
.long __pthread_initialize_minimal_internal@PLT\n\
1:\n\
@@ -91,16 +84,6 @@ _init:\n\
rts \n\
mov.l @r15+,r12\n\
END_INIT\n\
- .section .text\n\
- .align 5\n\
- .weak __gmon_start__\n\
- .type __gmon_start__,@function\n\
-__gmon_start__:\n\
- mov.l r14,@-r15\n\
- mov r15,r14\n\
- mov r14,r15\n\
- rts \n\
- mov.l @r15+,r14\n\
\n\
/*@_init_EPILOG_ENDS*/\n\
\n\
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
index 608c7364c..4a6059aef 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_barrier_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,13 +17,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
.globl pthread_barrier_wait
@@ -64,7 +61,13 @@ pthread_barrier_wait:
#if CURR_EVENT != 0
add #CURR_EVENT, r4
#endif
+#if FUTEX_WAIT == 0
+ mov.l @(PRIVATE,r8), r5
+#else
mov #FUTEX_WAIT, r5
+ mov.l @(PRIVATE,r8), r0
+ or r0, r5
+#endif
mov #0, r7
8:
mov #SYS_futex, r3
@@ -81,8 +84,10 @@ pthread_barrier_wait:
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
- INC (@(LEFT,r8), r2)
+ mov #1, r3
mov.l @(INIT_COUNT,r8), r4
+ XADD (r3, @(LEFT,r8), r2, r5)
+ add #-1, r4
cmp/eq r2, r4
bf 10f
@@ -115,6 +120,8 @@ pthread_barrier_wait:
#endif
mov #0, r7
mov #FUTEX_WAKE, r5
+ mov.l @(PRIVATE,r8), r0
+ or r0, r5
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
@@ -122,8 +129,10 @@ pthread_barrier_wait:
/* Increment LEFT. If this brings the count back to the
initial count unlock the object. */
- INC (@(LEFT,r8), r2)
+ mov #1, r3
mov.l @(INIT_COUNT,r8), r4
+ XADD (r3, @(LEFT,r8), r2, r5)
+ add #-1, r4
cmp/eq r2, r4
bf 5f
@@ -139,6 +148,10 @@ pthread_barrier_wait:
mov.l @r15+, r9
1:
+ mov.l @(PRIVATE,r8), r6
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r6
mov r2, r4
mov r8, r5
mov.l .Lwait0, r1
@@ -149,6 +162,10 @@ pthread_barrier_wait:
nop
4:
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake0, r1
bsrf r1
@@ -159,6 +176,10 @@ pthread_barrier_wait:
6:
mov r6, r9
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake1, r1
bsrf r1
@@ -167,8 +188,12 @@ pthread_barrier_wait:
bra 7b
mov r9, r6
-9:
+9:
mov r6, r9
+ mov.l @(PRIVATE,r8), r5
+ mov #LLL_SHARED, r0
+ extu.b r0, r0
+ xor r0, r5
mov r8, r4
mov.l .Lwake2, r1
bsrf r1
@@ -181,11 +206,11 @@ pthread_barrier_wait:
.Lall:
.long 0x7fffffff
.Lwait0:
- .long __lll_mutex_lock_wait-.Lwait0b
+ .long __lll_lock_wait-.Lwait0b
.Lwake0:
- .long __lll_mutex_unlock_wake-.Lwake0b
+ .long __lll_unlock_wake-.Lwake0b
.Lwake1:
- .long __lll_mutex_unlock_wake-.Lwake1b
+ .long __lll_unlock_wake-.Lwake1b
.Lwake2:
- .long __lll_mutex_unlock_wake-.Lwake2b
+ .long __lll_unlock_wake-.Lwake2b
.size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
index 36eccf1e6..382512490 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,17 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
-
.text
/* int pthread_cond_broadcast (pthread_cond_t *cond) */
@@ -96,8 +92,24 @@ __pthread_cond_broadcast:
bt/s 9f
add #cond_futex, r4
+ /* XXX: The kernel only supports FUTEX_CMP_REQUEUE to the same
+ type of futex (private resp. shared). */
+ mov.l @(MUTEX_KIND,r9), r0
+ tst #(PI_BIT|PS_BIT), r0
+ bf 9f
+
/* Wake up all threads. */
- mov #FUTEX_CMP_REQUEUE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_CMP_REQUEUE, r0
+ or r0, r5
+#endif
mov #1, r6
mov #-1, r7
shlr r7 /* r7 = 0x7fffffff */
@@ -154,10 +166,17 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait5, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait5, r1
bsrf r1
mov r2, r4
-.Lmwait5b:
+.Lwait5b:
bra 2b
nop
@@ -167,10 +186,16 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake5, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake5, r1
bsrf r1
- nop
-.Lmwake5b:
+ extu.b r5, r5
+.Lwake5b:
bra 6b
nop
@@ -180,15 +205,36 @@ __pthread_cond_broadcast:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake6, r1
+ mov #-1, r0
+ cmp/eq r0, r9
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake6, r1
bsrf r1
- nop
-.Lmwake6b:
+ extu.b r5, r5
+.Lwake6b:
bra 8b
nop
9:
- mov #FUTEX_WAKE, r5
+ mov #-1, r0
+ cmp/eq r0, r9
+ bt/s 99f
+ mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+99:
mov #-1, r6
shlr r6 /* r6 = 0x7fffffff */
mov #0, r7
@@ -199,12 +245,17 @@ __pthread_cond_broadcast:
bra 10b
nop
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
.align 2
-.Lmwait5:
- .long __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
- .long __lll_mutex_unlock_wake-.Lmwake5b
-.Lmwake6:
- .long __lll_mutex_unlock_wake-.Lmwake6b
+.Lwait5:
+ .long __lll_lock_wait-.Lwait5b
+.Lwake5:
+ .long __lll_unlock_wake-.Lwake5b
+.Lwake6:
+ .long __lll_unlock_wake-.Lwake6b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
weak_alias (__pthread_cond_broadcast, pthread_cond_broadcast)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
index d92f11cbb..914a1bad0 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,16 +17,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
+#include <pthread-errnos.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-
-#define EINVAL 22
-
.text
/* int pthread_cond_signal (pthread_cond_t *cond) */
@@ -77,14 +73,63 @@ __pthread_cond_signal:
/* Wake up one thread. */
mov r8, r4
add #cond_futex, r4
- mov #FUTEX_WAKE, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAKE_OP, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE_OP, r0
+ or r0, r5
+#endif
+99:
mov #1, r6
mov #0, r7
+ mov r8, r0
+ add #cond_lock, r0
+ mov.l .Lfutexop, r1
mov #SYS_futex, r3
extu.b r3, r3
trapa #0x14
SYSCALL_INST_PAD
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ mov r0, r1
+ mov #-12, r2
+ shad r2, r1
+ not r1, r1
+ tst r1, r1
+ bt 7f
+
+6:
+ mov #0, r0
+ lds.l @r15+, pr
+ rts
+ mov.l @r15+, r8
+
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+ .align 2
+.Lfutexop:
+ .long FUTEX_OP_CLEAR_WAKE_IF_GT_ONE
+
+7:
+ /* r5 should be either FUTEX_WAKE_OP or
+ FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG from the previous syscall. */
+ mov #(FUTEX_WAKE ^ FUTEX_WAKE_OP), r0
+ xor r0, r5
+ trapa #0x14
+ SYSCALL_INST_PAD
+
4:
/* Unlock. */
#if cond_lock != 0
@@ -93,12 +138,26 @@ __pthread_cond_signal:
DEC (@r8, r2)
#endif
tst r2, r2
- bf 5f
-6:
- mov #0, r0
- lds.l @r15+, pr
- rts
- mov.l @r15+, r8
+ bt 6b
+
+5:
+ /* Unlock in loop requires wakeup. */
+ mov r8, r4
+#if cond_lock != 0
+ add #cond_lock, r4
+#endif
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake4, r1
+ bsrf r1
+ extu.b r5, r5
+.Lwake4b:
+ bra 6b
+ nop
1:
/* Initial locking failed. */
@@ -106,30 +165,24 @@ __pthread_cond_signal:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait4, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait4, r1
bsrf r1
mov r2, r4
-.Lmwait4b:
+.Lwait4b:
bra 2b
nop
-5:
- /* Unlock in loop requires wakeup. */
- mov r8, r4
-#if cond_lock != 0
- add #cond_lock, r4
-#endif
- mov.l .Lmwake4, r1
- bsrf r1
- nop
-.Lmwake4b:
- bra 6b
- nop
-
.align 2
-.Lmwait4:
- .long __lll_mutex_lock_wait-.Lmwait4b
-.Lmwake4:
- .long __lll_mutex_unlock_wake-.Lmwake4b
+.Lwait4:
+ .long __lll_lock_wait-.Lwait4b
+.Lwake4:
+ .long __lll_unlock_wake-.Lwake4b
.size __pthread_cond_signal, .-__pthread_cond_signal
weak_alias (__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
index 5812488b2..3e117564f 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,16 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
@@ -119,7 +116,7 @@ __pthread_cond_timedwait:
mov.l @(cond_futex,r8), r0
add r2, r0
mov.l r0, @(cond_futex,r8)
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8), r0
add r2, r0
mov.l r0, @(cond_nwaiters,r8)
@@ -135,7 +132,7 @@ __pthread_cond_timedwait:
#ifdef __NR_clock_gettime
/* Get the clock number. */
mov.l @(cond_nwaiters,r8), r4
- mov #((1 << clock_bits) - 1), r0
+ mov #((1 << nwaiters_shift) - 1), r0
and r0, r4
/* Only clocks 0 and 1 are allowed. Both are handled in the
kernel. */
@@ -163,7 +160,7 @@ __pthread_cond_timedwait:
mov r15, r4
add #16, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -181,7 +178,7 @@ __pthread_cond_timedwait:
mov r15, r4
add #16, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -233,7 +230,22 @@ __pthread_cond_timedwait:
mov r15, r7
add #16, r7
- mov #FUTEX_WAIT, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+#endif
+99:
mov.l @(8,r15), r6
mov r8, r4
add #cond_futex, r4
@@ -322,7 +334,7 @@ __pthread_cond_timedwait:
mov.l r1,@(woken_seq+4,r8)
24:
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8),r0
sub r2, r0
mov.l r0,@(cond_nwaiters,r8)
@@ -334,7 +346,7 @@ __pthread_cond_timedwait:
not r0, r0
cmp/eq #0, r0
bf/s 25f
- mov #((1 << clock_bits) - 1), r1
+ mov #((1 << nwaiters_shift) - 1), r1
not r1, r1
mov.l @(cond_nwaiters,r8),r0
tst r1, r0
@@ -342,7 +354,22 @@ __pthread_cond_timedwait:
mov r8, r4
add #cond_nwaiters, r4
- mov #FUTEX_WAKE, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+99:
mov #1, r6
mov #0, r7
mov #SYS_futex, r3
@@ -382,6 +409,10 @@ __pthread_cond_timedwait:
rts
mov.l @r15+, r8
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.L1k:
.word 1000
.align 2
@@ -402,10 +433,17 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait2, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait2, r1
bsrf r1
mov r2, r4
-.Lmwait2b:
+.Lwait2b:
bra 2b
nop
@@ -415,10 +453,16 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake2, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lmwait2, r1
bsrf r1
- nop
-.Lmwake2b:
+ extu.b r5, r5
+.Lmwait2b:
bra 4b
nop
@@ -428,10 +472,17 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait3, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait3, r1
bsrf r1
mov r2, r4
-.Lmwait3b:
+.Lwait3b:
bra 6b
nop
@@ -441,10 +492,16 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake3, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lmwait3, r1
bsrf r1
- nop
-.Lmwake3b:
+ extu.b r5, r5
+.Lmwait3b:
bra 11b
nop
@@ -463,25 +520,31 @@ __pthread_cond_timedwait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake4, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lmwait4, r1
bsrf r1
- nop
-.Lmwake4b:
+ extu.b r5, r5
+.Lmwait4b:
17:
bra 18b
mov.l @(24,r15), r0
.align 2
+.Lwait2:
+ .long __lll_lock_wait-.Lwait2b
.Lmwait2:
- .long __lll_mutex_lock_wait-.Lmwait2b
-.Lmwake2:
- .long __lll_mutex_unlock_wake-.Lmwake2b
+ .long __lll_unlock_wake-.Lmwait2b
+.Lwait3:
+ .long __lll_lock_wait-.Lwait3b
.Lmwait3:
- .long __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
- .long __lll_mutex_unlock_wake-.Lmwake3b
-.Lmwake4:
- .long __lll_mutex_unlock_wake-.Lmwake4b
+ .long __lll_unlock_wake-.Lmwait3b
+.Lmwait4:
+ .long __lll_unlock_wake-.Lmwait4b
.size __pthread_cond_timedwait, .-__pthread_cond_timedwait
weak_alias (__pthread_cond_timedwait, pthread_cond_timedwait)
@@ -505,10 +568,17 @@ __condvar_tw_cleanup:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait5, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait5, r1
bsrf r1
mov r2, r4
-.Lmwait5b:
+.Lwait5b:
1:
mov.l @(broadcast_seq,r8), r0
@@ -519,6 +589,21 @@ __condvar_tw_cleanup:
mov #1, r2
mov #0, r3
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ mov.l @(total_seq+4,r8), r0
+ mov.l @(wakeup_seq+4,r8), r1
+ cmp/hi r1, r0
+ bt/s 6f
+ cmp/hi r0, r1
+ bt 7f
+ mov.l @(total_seq,r8), r0
+ mov.l @(wakeup_seq,r8), r1
+ cmp/hs r0, r1
+ bt 7f
+
+6:
clrt
mov.l @(wakeup_seq,r8),r0
mov.l @(wakeup_seq+4,r8),r1
@@ -530,6 +615,7 @@ __condvar_tw_cleanup:
add r2, r0
mov.l r0,@(cond_futex,r8)
+7:
clrt
mov.l @(woken_seq,r8),r0
mov.l @(woken_seq+4,r8),r1
@@ -539,7 +625,7 @@ __condvar_tw_cleanup:
mov.l r1,@(woken_seq+4,r8)
3:
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8),r0
sub r2, r0
mov.l r0,@(cond_nwaiters,r8)
@@ -552,7 +638,7 @@ __condvar_tw_cleanup:
not r0, r0
cmp/eq #0, r0
bf/s 4f
- mov #((1 << clock_bits) - 1), r1
+ mov #((1 << nwaiters_shift) - 1), r1
not r1, r1
mov.l @(cond_nwaiters,r8),r0
tst r1, r0
@@ -582,10 +668,16 @@ __condvar_tw_cleanup:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake5, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lmwait5, r1
bsrf r1
- nop
-.Lmwake5b:
+ extu.b r5, r5
+.Lmwait5b:
2:
/* Wake up all waiters to make sure no signal gets lost. */
@@ -618,10 +710,10 @@ __condvar_tw_cleanup:
sleep
.align 2
+.Lwait5:
+ .long __lll_lock_wait-.Lwait5b
.Lmwait5:
- .long __lll_mutex_lock_wait-.Lmwait5b
-.Lmwake5:
- .long __lll_mutex_unlock_wake-.Lmwake5b
+ .long __lll_unlock_wake-.Lmwait5b
.Lmlocki5:
.long __pthread_mutex_cond_lock-.Lmlocki5b
.Lresume:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
index c7df9bf86..5a897f6fe 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,13 +17,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
+#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-
.text
/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
@@ -105,7 +104,7 @@ __pthread_cond_wait:
mov.l @(cond_futex,r8),r0
add r2, r0
mov.l r0,@(cond_futex,r8)
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8), r0
add r2, r0
mov.l r0, @(cond_nwaiters,r8)
@@ -137,7 +136,22 @@ __pthread_cond_wait:
mov.l r0, @r15
mov #0, r7
- mov #FUTEX_WAIT, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff0, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+#endif
+99:
mov.l @(8,r15), r6
mov r8, r4
add #cond_futex, r4
@@ -195,7 +209,7 @@ __pthread_cond_wait:
mov.l r1,@(woken_seq+4,r8)
16:
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8),r0
sub r2, r0
mov.l r0,@(cond_nwaiters,r8)
@@ -207,7 +221,7 @@ __pthread_cond_wait:
not r0, r0
cmp/eq #0, r0
bf/s 17f
- mov #((1 << clock_bits) - 1), r1
+ mov #((1 << nwaiters_shift) - 1), r1
not r1, r1
mov.l @(cond_nwaiters,r8),r0
tst r1, r0
@@ -215,7 +229,22 @@ __pthread_cond_wait:
mov r8, r4
add #cond_nwaiters, r4
- mov #FUTEX_WAKE, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff0, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+99:
mov #1, r6
mov #0, r7
mov #SYS_futex, r3
@@ -249,6 +278,10 @@ __pthread_cond_wait:
rts
mov.l @r15+, r8
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff0:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.align 2
.Lmunlock0:
.long __pthread_mutex_unlock_usercnt-.Lmunlock0b
@@ -265,10 +298,17 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait0, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait0, r1
bsrf r1
mov r2, r4
-.Lmwait0b:
+.Lwait0b:
bra 2b
nop
3:
@@ -277,10 +317,16 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake0, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake0, r1
bsrf r1
- nop
-.Lmwake0b:
+ extu.b r5, r5
+.Lwake0b:
bra 4b
nop
@@ -290,10 +336,17 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait1, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait1, r1
bsrf r1
mov r2, r4
-.Lmwait1b:
+.Lwait1b:
bra 6b
nop
@@ -303,10 +356,16 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake1, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake1, r1
bsrf r1
- nop
-.Lmwake1b:
+ extu.b r5, r5
+.Lwake1b:
bra 11b
nop
@@ -325,26 +384,32 @@ __pthread_cond_wait:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake2, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake2, r1
bsrf r1
- nop
-.Lmwake2b:
+ extu.b r5, r5
+.Lwake2b:
13:
bra 14b
mov.l @(12,r15), r0
.align 2
-.Lmwait0:
- .long __lll_mutex_lock_wait-.Lmwait0b
-.Lmwake0:
- .long __lll_mutex_unlock_wake-.Lmwake0b
-.Lmwait1:
- .long __lll_mutex_lock_wait-.Lmwait1b
-.Lmwake1:
- .long __lll_mutex_unlock_wake-.Lmwake1b
-.Lmwake2:
- .long __lll_mutex_unlock_wake-.Lmwake2b
+.Lwait0:
+ .long __lll_lock_wait-.Lwait0b
+.Lwake0:
+ .long __lll_unlock_wake-.Lwake0b
+.Lwait1:
+ .long __lll_lock_wait-.Lwait1b
+.Lwake1:
+ .long __lll_unlock_wake-.Lwake1b
+.Lwake2:
+ .long __lll_unlock_wake-.Lwake2b
.size __pthread_cond_wait, .-__pthread_cond_wait
weak_alias (__pthread_cond_wait, pthread_cond_wait)
@@ -368,10 +433,17 @@ __condvar_w_cleanup:
#if cond_lock != 0
add #cond_lock, r5
#endif
- mov.l .Lmwait3, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r6
+ mov #LLL_SHARED, r6
+99:
+ extu.b r6, r6
+ mov.l .Lwait3, r1
bsrf r1
mov r2, r4
-.Lmwait3b:
+.Lwait3b:
1:
mov.l @(broadcast_seq,r8), r0
@@ -382,6 +454,21 @@ __condvar_w_cleanup:
mov #1, r2
mov #0, r3
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ mov.l @(total_seq+4,r8), r0
+ mov.l @(wakeup_seq+4,r8), r1
+ cmp/hi r1, r0
+ bt/s 6f
+ cmp/hi r0, r1
+ bt 7f
+ mov.l @(total_seq,r8), r0
+ mov.l @(wakeup_seq,r8), r1
+ cmp/hs r0, r1
+ bt 7f
+
+6:
clrt
mov.l @(wakeup_seq,r8),r0
mov.l @(wakeup_seq+4,r8),r1
@@ -393,6 +480,7 @@ __condvar_w_cleanup:
add r2, r0
mov.l r0,@(cond_futex,r8)
+7:
clrt
mov.l @(woken_seq,r8),r0
mov.l @(woken_seq+4,r8),r1
@@ -402,7 +490,7 @@ __condvar_w_cleanup:
mov.l r1,@(woken_seq+4,r8)
3:
- mov #(1 << clock_bits), r2
+ mov #(1 << nwaiters_shift), r2
mov.l @(cond_nwaiters,r8),r0
sub r2, r0
mov.l r0,@(cond_nwaiters,r8)
@@ -415,7 +503,7 @@ __condvar_w_cleanup:
not r0, r0
cmp/eq #0, r0
bf/s 4f
- mov #((1 << clock_bits) - 1), r1
+ mov #((1 << nwaiters_shift) - 1), r1
not r1, r1
mov.l @(cond_nwaiters,r8),r0
tst r1, r0
@@ -423,7 +511,22 @@ __condvar_w_cleanup:
mov r8, r4
add #cond_nwaiters, r4
- mov #FUTEX_WAKE, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff1, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+99:
mov #1, r6
mov #0, r7
mov #SYS_futex, r3
@@ -445,10 +548,16 @@ __condvar_w_cleanup:
#if cond_lock != 0
add #cond_lock, r4
#endif
- mov.l .Lmwake3, r1
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bf/s 99f
+ mov #LLL_PRIVATE, r5
+ mov #LLL_SHARED, r5
+99:
+ mov.l .Lwake3, r1
bsrf r1
- nop
-.Lmwake3b:
+ extu.b r5, r5
+.Lwake3b:
2:
/* Wake up all waiters to make sure no signal gets lost. */
@@ -456,7 +565,22 @@ __condvar_w_cleanup:
bf/s 5f
mov r8, r4
add #cond_futex, r4
- mov #FUTEX_WAKE, r5
+ mov.l @(dep_mutex,r8), r0
+ cmp/eq #-1, r0
+ bt/s 99f
+ mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff1, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+99:
mov #-1, r6
shlr r6 /* r6 = 0x7fffffff */
mov #0, r7
@@ -480,11 +604,15 @@ __condvar_w_cleanup:
mov r11, r4
sleep
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff1:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.align 2
-.Lmwait3:
- .long __lll_mutex_lock_wait-.Lmwait3b
-.Lmwake3:
- .long __lll_mutex_unlock_wake-.Lmwake3b
+.Lwait3:
+ .long __lll_lock_wait-.Lwait3b
+.Lwake3:
+ .long __lll_unlock_wake-.Lwake3b
.Lmlocki3:
.long __pthread_mutex_cond_lock-.Lmlocki3b
.Lresume:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S
index de83dec1b..caebb935a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_once.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,10 +18,10 @@
#include <unwindbuf.h>
#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
.comm __fork_generation, 4, 4
@@ -94,7 +94,19 @@ __pthread_once:
bf 3f /* Different for generation -> run initializer. */
/* Somebody else got here first. Wait. */
- mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+# if FUTEX_WAIT != 0
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+# endif
+#endif
mov r3, r6
mov #0, r7
mov #SYS_futex, r3
@@ -156,7 +168,17 @@ __pthread_once:
INC (@r9, r2)
/* Wake up all other threads. */
mov r9, r4
- mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+ extu.b r5, r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
mov #-1, r6
shlr r6 /* r6 = 0x7fffffff */
mov #0, r7
@@ -191,7 +213,17 @@ __pthread_once:
mov #0, r7
mov.l r7, @r9
mov r9, r4
- mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r5
+#else
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+#endif
+ extu.b r5, r5
mov #-1, r6
shlr r6 /* r6 = 0x7fffffff */
mov #SYS_futex, r3
@@ -207,6 +239,10 @@ __pthread_once:
sleep
cfi_endproc
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.align 2
.Lsigsetjmp:
.long __sigsetjmp@PLT-(.Lsigsetjmp0-.)
@@ -223,23 +259,3 @@ __pthread_once_internal = __pthread_once
.globl pthread_once
pthread_once = __pthread_once
-
-
- .type clear_once_control,@function
- .align 5
-clear_once_control:
- mov #0, r0
- mov.l r0, @r4
-
- mov #FUTEX_WAKE, r5
- mov #-1, r6
- shlr r6 /* r6 = 0x7fffffff */
- mov #0, r7
- mov #SYS_futex, r3
- extu.b r3, r3
- trapa #0x14
- SYSCALL_INST_PAD
-
- rts
- nop
- .size clear_once_control,.-clear_once_control
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
index ce1ab37c8..52fe5de10 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_rdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -54,7 +53,8 @@ __pthread_rwlock_rdlock:
mov.l @(WRITERS_QUEUED,r8), r0
tst r0, r0
bt 5f
- mov.l @(FLAGS,r8), r0
+ mov #FLAGS, r0
+ mov.b @(r0,r8), r0
tst r0, r0
bt 5f
3:
@@ -74,9 +74,28 @@ __pthread_rwlock_rdlock:
tst r2, r2
bf 10f
11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+ xor r0, r5
+ extu.b r5, r5
+#else
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
+# if FUTEX_WAIT != 0
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+# endif
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r0
+ xor r0, r5
+#endif
mov r8, r4
add #READERS_WAKEUP, r4
- mov #FUTEX_WAIT, r5
mov r9, r6
mov #0, r7
mov #SYS_futex, r3
@@ -123,15 +142,22 @@ __pthread_rwlock_rdlock:
rts
mov r3, r0
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
+
1:
mov r8, r5
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait0, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait0b:
bra 2b
nop
@@ -154,6 +180,9 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake0, r1
bsrf r1
nop
@@ -182,6 +211,9 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake1, r1
bsrf r1
nop
@@ -194,23 +226,25 @@ __pthread_rwlock_rdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait1, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait1b:
bra 13b
nop
.align 2
.Lwait0:
- .long __lll_mutex_lock_wait-.Lwait0b
+ .long __lll_lock_wait-.Lwait0b
.Lwake0:
- .long __lll_mutex_unlock_wake-.Lwake0b
+ .long __lll_unlock_wake-.Lwake0b
.Lwait1:
- .long __lll_mutex_lock_wait-.Lwait1b
+ .long __lll_lock_wait-.Lwait1b
.Lwake1:
- .long __lll_mutex_unlock_wake-.Lwake1b
+ .long __lll_unlock_wake-.Lwake1b
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
.globl pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
index 8a4e7d3bc..6e7af21e9 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedrdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,15 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -58,7 +56,8 @@ pthread_rwlock_timedrdlock:
mov.l @(WRITERS_QUEUED,r8), r0
tst r0, r0
bt 5f
- mov.l @(FLAGS,r8), r0
+ mov #FLAGS, r0
+ mov.b @(r0,r8), r0
tst r0, r0
bt 5f
3:
@@ -88,7 +87,7 @@ pthread_rwlock_timedrdlock:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -116,7 +115,26 @@ pthread_rwlock_timedrdlock:
/* Futex call. */
mov r15, r7
- mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+ xor r0, r5
+ extu.b r5, r5
+#else
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
+# if FUTEX_WAIT != 0
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+# endif
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r0
+ xor r0, r5
+#endif
mov r10, r6
mov r8, r4
add #READERS_WAKEUP, r4
@@ -175,6 +193,10 @@ pthread_rwlock_timedrdlock:
rts
mov r3, r0
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.align 2
.L1k0:
.long 1000
@@ -186,10 +208,12 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait2, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait2b:
bra 2b
nop
@@ -208,16 +232,20 @@ pthread_rwlock_timedrdlock:
.word TID - TLS_PRE_TCB_SIZE
6:
+ mov r3, r10
mov r8, r4
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake2, r1
bsrf r1
nop
.Lwake2b:
bra 7b
- mov #0, r3
+ mov r10, r3
8:
/* Overflow. */
@@ -240,6 +268,9 @@ pthread_rwlock_timedrdlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake3, r1
bsrf r1
nop
@@ -248,17 +279,20 @@ pthread_rwlock_timedrdlock:
nop
12:
+ mov r3, r10
mov r8, r5
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait3, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait3b:
bra 13b
- nop
+ mov r10, r3
16:
bra 17b
@@ -270,11 +304,11 @@ pthread_rwlock_timedrdlock:
.align 2
.Lwait2:
- .long __lll_mutex_lock_wait-.Lwait2b
+ .long __lll_lock_wait-.Lwait2b
.Lwake2:
- .long __lll_mutex_unlock_wake-.Lwake2b
+ .long __lll_unlock_wake-.Lwake2b
.Lwait3:
- .long __lll_mutex_lock_wait-.Lwait3b
+ .long __lll_lock_wait-.Lwait3b
.Lwake3:
- .long __lll_mutex_unlock_wake-.Lwake3b
+ .long __lll_unlock_wake-.Lwake3b
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
index 6284140a6..1cb7cbdde 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_timedwrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,15 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -85,7 +83,7 @@ pthread_rwlock_timedwrlock:
/* Get current time. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -113,7 +111,26 @@ pthread_rwlock_timedwrlock:
/* Futex call. */
mov r15, r7
- mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+ xor r0, r5
+ extu.b r5, r5
+#else
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
+# if FUTEX_WAIT != 0
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+# endif
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r0
+ xor r0, r5
+#endif
mov r10, r6
mov r8, r4
add #WRITERS_WAKEUP, r4
@@ -174,6 +191,10 @@ pthread_rwlock_timedwrlock:
rts
mov r3, r0
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.L1k1:
.word 1000
.align 2
@@ -185,10 +206,12 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait6, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait6b:
bra 2b
nop
@@ -202,16 +225,20 @@ pthread_rwlock_timedwrlock:
bra 9b
mov #EDEADLK, r3
6:
+ mov r3, r10
mov r8, r4
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake6, r1
bsrf r1
nop
.Lwake6b:
bra 7b
- mov #0, r3
+ mov r10, r3
.Ltidoff:
.word TID - TLS_PRE_TCB_SIZE
@@ -229,6 +256,9 @@ pthread_rwlock_timedwrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake7, r1
bsrf r1
nop
@@ -237,17 +267,20 @@ pthread_rwlock_timedwrlock:
nop
12:
+ mov r3, r10
mov r8, r5
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait7, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait7b:
bra 13b
- nop
+ mov r10, r3
16:
bra 17b
@@ -255,11 +288,11 @@ pthread_rwlock_timedwrlock:
.align 2
.Lwait6:
- .long __lll_mutex_lock_wait-.Lwait6b
+ .long __lll_lock_wait-.Lwait6b
.Lwake6:
- .long __lll_mutex_unlock_wake-.Lwake6b
+ .long __lll_unlock_wake-.Lwake6b
.Lwait7:
- .long __lll_mutex_lock_wait-.Lwait7b
+ .long __lll_lock_wait-.Lwait7b
.Lwake7:
- .long __lll_mutex_unlock_wake-.Lwake7b
+ .long __lll_unlock_wake-.Lwake7b
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
index 74f32f8f9..239090b20 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_unlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,12 +17,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -86,7 +85,24 @@ __pthread_rwlock_unlock:
bf 7f
8:
- mov #FUTEX_WAKE, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAKE), r0
+ xor r0, r5
+ extu.b r5, r5
+#else
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
+ mov #FUTEX_WAKE, r0
+ or r0, r5
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r0
+ xor r0, r5
+#endif
mov #SYS_futex, r3
mov #0, r7
extu.b r3, r3
@@ -118,10 +134,12 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait8, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait8b:
bra 2b
nop
@@ -130,6 +148,9 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake8, r1
bsrf r1
nop
@@ -144,6 +165,9 @@ __pthread_rwlock_unlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake9, r1
bsrf r1
nop
@@ -153,13 +177,17 @@ __pthread_rwlock_unlock:
bra 8b
mov.l @r15+, r4
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.align 2
.Lwait8:
- .long __lll_mutex_lock_wait-.Lwait8b
+ .long __lll_lock_wait-.Lwait8b
.Lwake8:
- .long __lll_mutex_unlock_wake-.Lwake8b
+ .long __lll_unlock_wake-.Lwake8b
.Lwake9:
- .long __lll_mutex_unlock_wake-.Lwake9b
+ .long __lll_unlock_wake-.Lwake9b
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
index d071f7f03..3d37fb486 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/pthread_rwlock_wrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,14 +17,13 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
#include "lowlevel-atomic.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
@@ -73,7 +72,26 @@ __pthread_rwlock_wrlock:
11:
mov r8, r4
add #WRITERS_WAKEUP, r4
- mov #FUTEX_WAIT, r5
+#ifdef __ASSUME_PRIVATE_FUTEX
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ mov #(FUTEX_PRIVATE_FLAG|FUTEX_WAIT), r0
+ xor r0, r5
+ extu.b r5, r5
+#else
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
+# if FUTEX_WAIT != 0
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+# endif
+ stc gbr, r1
+ mov.w .Lpfoff, r2
+ add r2, r1
+ mov.l @r1, r0
+ xor r0, r5
+#endif
mov r9, r6
mov #0, r7
mov #SYS_futex, r3
@@ -123,10 +141,12 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait4, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait4b:
bra 2b
nop
@@ -144,6 +164,9 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake4, r1
bsrf r1
nop
@@ -151,6 +174,10 @@ __pthread_rwlock_wrlock:
bra 7b
mov #0, r3
+#ifndef __ASSUME_PRIVATE_FUTEX
+.Lpfoff:
+ .word PRIVATE_FUTEX - TLS_PRE_TCB_SIZE
+#endif
.Ltidoff:
.word TID - TLS_PRE_TCB_SIZE
@@ -166,6 +193,9 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r4
#endif
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r5
+ extu.b r5, r5
mov.l .Lwake5, r1
bsrf r1
nop
@@ -178,23 +208,25 @@ __pthread_rwlock_wrlock:
#if MUTEX != 0
add #MUTEX, r5
#endif
- mov r2, r4
+ mov #PSHARED, r0
+ mov.b @(r0,r8), r6
+ extu.b r6, r6
mov.l .Lwait5, r1
bsrf r1
- nop
+ mov r2, r4
.Lwait5b:
bra 13b
nop
.align 2
.Lwait4:
- .long __lll_mutex_lock_wait-.Lwait4b
+ .long __lll_lock_wait-.Lwait4b
.Lwake4:
- .long __lll_mutex_unlock_wake-.Lwake4b
+ .long __lll_unlock_wake-.Lwake4b
.Lwait5:
- .long __lll_mutex_lock_wait-.Lwait5b
+ .long __lll_lock_wait-.Lwait5b
.Lwake5:
- .long __lll_mutex_unlock_wake-.Lwake5b
+ .long __lll_unlock_wake-.Lwake5b
.globl pthread_rwlock_wrlock
pthread_rwlock_wrlock = __pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S
index 9755b7e16..f71cd930d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_post.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,22 +18,34 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
.text
.globl __new_sem_post
.type __new_sem_post,@function
.align 5
__new_sem_post:
- INC (@r4, r6)
-
+ mov.l @(VALUE,r4), r2
+0:
+ mov.l .Lmax, r1
+ cmp/eq r1, r2
+ bt/s 3f
+ mov r2, r3
+ mov r3, r5
+ add #1, r5
+ CMPXCHG (r3, @(VALUE,r4), r5, r2)
+ bf 0b
+ mov.l @(NWAITERS,r4), r2
+ tst r2, r2
+ bt 2f
mov #FUTEX_WAKE, r5
+ mov.l @(PRIVATE,r4), r1
+ or r1, r5
+ mov #1, r6
mov #0, r7
mov #SYS_futex, r3
extu.b r3, r3
@@ -42,11 +54,20 @@ __new_sem_post:
cmp/pz r0
bf 1f
+2:
rts
mov #0, r0
1:
- mov #EINVAL, r2
+ bra 4f
+ mov #EINVAL, r2
+
+3:
+ mov #EOVERFLOW, r2
+4:
+ mov.l r12, @-r15
+ mov.l r8, @-r15
+ sts.l pr, @-r15
mova .Lgot3, r0
mov.l .Lgot3, r12
add r0, r12
@@ -55,25 +76,30 @@ __new_sem_post:
mov.l .Lerrno3, r0
stc gbr, r1
mov.l @(r0, r12), r0
- bra .Lexit
- add r1, r0
- .align 2
+ bra .Lexit
+ add r1, r0
+ .align 2
.Lerrno3:
.long errno@GOTTPOFF
.Lexit:
+ mov.l r2, @r0
#else
+ mov r2, r8
mov.l .Lerrloc3, r1
bsrf r1
nop
.Lerrloc3b:
+ mov r8, @r0
#endif
- mov.l r2, @r0
lds.l @r15+, pr
+ mov.l @r15+, r8
mov.l @r15+, r12
rts
mov #-1, r0
.align 2
+.Lmax:
+ .long SEM_VALUE_MAX
.Lgot3:
.long _GLOBAL_OFFSET_TABLE_
#if !USE___THREAD
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
index 40782fcaf..774442f23 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,31 +19,22 @@
#include <sysdep.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
.globl sem_timedwait
.type sem_timedwait,@function
.align 5
- cfi_startproc
sem_timedwait:
- /* First check for cancellation. */
- stc gbr, r0
- mov.w .Lchand, r1
- mov.l @(r0,r1), r0
- mov #0xf9, r1
- and r1, r0
- cmp/eq #8, r0
- bf 0f
- bra 10f
- stc gbr, r0
-0:
+.LSTARTCODE:
mov.l @r4, r0
2:
tst r0, r0
@@ -60,22 +51,17 @@ sem_timedwait:
1:
/* Check whether the timeout value is valid. */
mov.l r8, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r8, 0)
+.Lpush_r8:
mov.l r9, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r9, 0)
+.Lpush_r9:
mov.l r10, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r10, 0)
+.Lpush_r10:
mov.l r12, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r12, 0)
+.Lpush_r12:
sts.l pr, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (pr, 0)
+.Lpush_pr:
add #-8, r15
- cfi_adjust_cfa_offset(8)
+.Lalloc:
mov r4, r8
mov r5, r9
@@ -85,17 +71,13 @@ sem_timedwait:
cmp/hs r1, r0
bt/s 6f
mov #EINVAL, r0
-7:
- mov.l .Lenable0, r1
- bsrf r1
- nop
-.Lenable0b:
- mov r0, r10
+ INC (@(NWAITERS,r8),r2)
+7:
/* Compute relative timeout. */
mov r15, r4
mov #0, r5
- mov #SYS_gettimeofday, r3
+ mov #__NR_gettimeofday, r3
trapa #0x12
SYSCALL_INST_PAD
@@ -116,15 +98,27 @@ sem_timedwait:
5:
cmp/pz r2
bf/s 6f /* Time is already up. */
- mov #ETIMEDOUT, r0
+ mov #ETIMEDOUT, r0
/* Store relative timeout. */
mov.l r2, @r15
mov.l r3, @(4,r15)
- /* Futex call. */
+.LcleanupSTART:
+ mov.l .Lenable0, r1
+ bsrf r1
+ nop
+.Lenable0b:
+ mov r0, r10
+
mov r8, r4
- mov #FUTEX_WAIT, r5
+#if FUTEX_WAIT == 0
+ mov.l @(PRIVATE,r8), r5
+#else
+ mov.l @(PRIVATE,r8), r5
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+#endif
mov #0, r6
mov r15, r7
mov #SYS_futex, r3
@@ -138,6 +132,7 @@ sem_timedwait:
mov r0, r10
.Ldisable0b:
mov r10, r0
+.LcleanupEND:
tst r0, r0
bt 9f
@@ -156,6 +151,10 @@ sem_timedwait:
bf/s 8b
mov r2, r0
+ DEC (@(NWAITERS,r8), r2)
+ mov #0, r0
+
+10:
add #8, r15
lds.l @r15+, pr
mov.l @r15+, r12
@@ -163,12 +162,12 @@ sem_timedwait:
mov.l @r15+, r9
mov.l @r15+, r8
rts
- mov #0, r0
+ nop
3:
neg r0, r0
6:
- mov r0, r8
+ mov r0, r10
mova .Lgot2, r0
mov.l .Lgot2, r12
add r0, r12
@@ -177,11 +176,11 @@ sem_timedwait:
mov.l .Lerrno2, r0
stc gbr, r1
mov.l @(r0, r12), r0
- bra .Lexit
- add r1, r0
- .align 2
+ bra .Lexit
+ add r1, r0
+ .align 2
.Lerrno2:
- .long errno@GOTTPOFF
+ .long errno@GOTTPOFF
.Lexit:
#else
mov.l .Lerrloc2, r1
@@ -189,39 +188,13 @@ sem_timedwait:
nop
.Lerrloc2b:
#endif
- mov.l r8, @r0
- add #8, r15
- lds.l @r15+, pr
- mov.l @r15+, r12
- mov.l @r15+, r10
- mov.l @r15+, r9
- mov.l @r15+, r8
- rts
+ mov.l r10, @r0
+ DEC (@(NWAITERS,r8), r2)
+ bra 10b
mov #-1, r0
-10:
- /* Canceled. */
- mov.w .Lresult, r1
- mov #-1, r2
- mov.l r2, @(r0,r1)
- mov.w .Lchand, r0
- or.b #0x10, @(r0,gbr)
- stc gbr, r0
- mov.w .Lclbuf, r1
- mov.l .Lunwind, r2
- braf r2
- mov.l @(r0,r1), r4
-.Lunwindb:
- cfi_endproc
-
.L1k:
.word 1000
-.Lchand:
- .word CANCELHANDLING - TLS_PRE_TCB_SIZE
-.Lresult:
- .word RESULT - TLS_PRE_TCB_SIZE
-.Lclbuf:
- .word CLEANUP_JMP_BUF - TLS_PRE_TCB_SIZE
.align 2
.L1g:
.long 1000000000
@@ -235,6 +208,151 @@ sem_timedwait:
.long __pthread_enable_asynccancel-.Lenable0b
.Ldisable0:
.long __pthread_disable_asynccancel-.Ldisable0b
-.Lunwind:
- .long HIDDEN_JUMPTARGET (__pthread_unwind)-.Lunwindb
.size sem_timedwait,.-sem_timedwait
+
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+ mov.l .Lresume, r1
+#ifdef PIC
+ add r12, r1
+#endif
+ jsr @r1
+ nop
+ sleep
+
+ .align 2
+.Lresume:
+#ifdef PIC
+ .long _Unwind_Resume@GOTOFF
+#else
+ .long _Unwind_Resume
+#endif
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff ! @LPStart format (omit)
+ .byte 0xff ! @TType format (omit)
+ .byte 0x01 ! call-site format
+ ! DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .ualong .LENDCIE-.LSTARTCIE ! Length of the CIE.
+.LSTARTCIE:
+ .ualong 0 ! CIE ID.
+ .byte 1 ! Version number.
+#ifdef SHARED
+ .string "zPLR" ! NUL-terminated augmentation
+ ! string.
+#else
+ .string "zPL" ! NUL-terminated augmentation
+ ! string.
+#endif
+ .uleb128 1 ! Code alignment factor.
+ .sleb128 -4 ! Data alignment factor.
+ .byte 0x11 ! Return address register
+ ! column.
+#ifdef SHARED
+ .uleb128 7 ! Augmentation value length.
+ .byte 0x9b ! Personality: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4
+ ! + DW_EH_PE_indirect
+ .ualong DW.ref.__gcc_personality_v0-.
+ .byte 0x1b ! LSDA Encoding: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4.
+ .byte 0x1b ! FDE Encoding: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 ! Augmentation value length.
+ .byte 0x0 ! Personality: absolute
+ .ualong __gcc_personality_v0
+ .byte 0x0 ! LSDA Encoding: absolute
+#endif
+ .byte 0x0c ! DW_CFA_def_cfa
+ .uleb128 0xf
+ .uleb128 0
+ .align 4
+.LENDCIE:
+
+ .ualong .LENDFDE-.LSTARTFDE ! Length of the FDE.
+.LSTARTFDE:
+ .ualong .LSTARTFDE-.LSTARTFRAME ! CIE pointer.
+#ifdef SHARED
+ .ualong .LSTARTCODE-. ! PC-relative start address
+ ! of the code.
+#else
+ .ualong .LSTARTCODE ! Start address of the code.
+#endif
+ .ualong .LENDCODE-.LSTARTCODE ! Length of the code.
+ .uleb128 4 ! Augmentation size
+#ifdef SHARED
+ .ualong .LexceptSTART-.
+#else
+ .ualong .LexceptSTART
+#endif
+
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r8-.LSTARTCODE
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0x88 ! DW_CFA_offset r8
+ .uleb128 1
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r9-.Lpush_r8
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x89 ! DW_CFA_offset r9
+ .uleb128 2
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r10-.Lpush_r9
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x8a ! DW_CFA_offset r10
+ .uleb128 3
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r12-.Lpush_r10
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x8c ! DW_CFA_offset r12
+ .uleb128 4
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_pr-.Lpush_r12
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 20
+ .byte 0x91 ! DW_CFA_offset pr
+ .uleb128 5
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lalloc-.Lpush_pr
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 28
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S
index 00c61f3bb..b46eb1a56 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_trywait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +18,7 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
@@ -59,9 +60,9 @@ __new_sem_trywait:
mov.l .Lerrno1, r0
stc gbr, r1
mov.l @(r0, r12), r0
- bra .Lexit
- add r1, r0
- .align 2
+ bra .Lexit
+ add r1, r0
+ .align 2
.Lerrno1:
.long errno@GOTTPOFF
.Lexit:
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S
index 7d13fa130..00a125bc5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sem_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -19,43 +19,32 @@
#include <sysdep.h>
#include <pthread-errnos.h>
#include <tcb-offsets.h>
+#include <structsem.h>
+#include <lowlevellock.h>
#include "lowlevel-atomic.h"
-#define SYS_gettimeofday __NR_gettimeofday
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#if VALUE != 0
+# error "code needs to be rewritten for VALUE != 0"
+#endif
.text
.globl __new_sem_wait
.type __new_sem_wait,@function
.align 5
- cfi_startproc
__new_sem_wait:
- /* First check for cancellation. */
- stc gbr, r0
- mov.w .Lchand, r1
- mov.l @(r0,r1), r0
- mov #0xf9, r1
- and r1, r0
- cmp/eq #8, r0
- bt 5f
-
+.LSTARTCODE:
mov.l r8, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r8, 0)
+.Lpush_r8:
mov.l r10, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r10, 0)
+.Lpush_r10:
mov.l r12, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (r12, 0)
+.Lpush_r12:
sts.l pr, @-r15
- cfi_adjust_cfa_offset(4)
- cfi_rel_offset (pr, 0)
+.Lpush_pr:
mov r4, r8
-3:
+
mov.l @r8, r0
2:
tst r0, r0
@@ -66,10 +55,21 @@ __new_sem_wait:
CMPXCHG (r4, @r8, r3, r2)
bf/s 2b
mov r2, r0
- bra 9f
- mov #0, r0
+7:
+ mov #0, r0
+9:
+ lds.l @r15+, pr
+ mov.l @r15+, r12
+ mov.l @r15+, r10
+ rts
+ mov.l @r15+, r8
+.Lafter_ret:
1:
+ INC (@(NWAITERS,r8),r2)
+
+.LcleanupSTART:
+6:
mov.l .Lenable0, r1
bsrf r1
nop
@@ -77,7 +77,13 @@ __new_sem_wait:
mov r0, r10
mov r8, r4
- mov #FUTEX_WAIT, r5
+#if FUTEX_WAIT == 0
+ mov.l @(PRIVATE,r8), r5
+#else
+ mov.l @(PRIVATE,r8), r5
+ mov #FUTEX_WAIT, r0
+ or r0, r5
+#endif
mov #0, r6
mov #0, r7
mov #SYS_futex, r3
@@ -91,14 +97,35 @@ __new_sem_wait:
mov r0, r10
.Ldisable0b:
mov r10, r0
+.LcleanupEND:
tst r0, r0
- bt 3b
+ bt 3f
cmp/eq #-EWOULDBLOCK, r0
- bt 3b
- neg r0, r0
+ bf 4f
+
+3:
+ mov.l @r8, r0
+5:
+ tst r0, r0
+ bt 6b
+
+ mov r0, r3
+ mov r0, r4
+ add #-1, r3
+ CMPXCHG (r4, @r8, r3, r2)
+ bf/s 5b
+ mov r2, r0
+
+ DEC (@(NWAITERS,r8), r2)
+ bra 7b
+ nop
- mov r0, r8
+4:
+ neg r0, r0
+ mov r0, r4
+ DEC (@(NWAITERS,r8), r2)
+ mov r4, r8
mova .Lgot0, r0
mov.l .Lgot0, r12
add r0, r12
@@ -107,9 +134,9 @@ __new_sem_wait:
mov.l .Lerrno0, r0
stc gbr, r1
mov.l @(r0, r12), r0
- bra .Lexit
- add r1, r0
- .align 2
+ bra .Lexit
+ add r1, r0
+ .align 2
.Lerrno0:
.long errno@GOTTPOFF
.Lexit:
@@ -120,35 +147,9 @@ __new_sem_wait:
.Lerrloc0b:
#endif
mov.l r8, @r0
- mov #-1, r0
-9:
- lds.l @r15+, pr
- mov.l @r15+, r12
- mov.l @r15+, r10
- rts
- mov.l @r15+, r8
-5:
- /* Canceled. */
- stc gbr, r0
- mov.w .Lresult, r1
- mov #-1, r2
- mov.l r2, @(r0,r1)
- mov.w .Lchand, r0
- or.b #0x10, @(r0,gbr)
- stc gbr, r0
- mov.w .Lclbuf, r1
- mov.l .Lunwind, r2
- braf r2
- mov.l @(r0,r1), r4
-.Lunwindb:
- cfi_endproc
-
-.Lchand:
- .word CANCELHANDLING - TLS_PRE_TCB_SIZE
-.Lresult:
- .word RESULT - TLS_PRE_TCB_SIZE
-.Lclbuf:
- .word CLEANUP_JMP_BUF - TLS_PRE_TCB_SIZE
+ bra 9b
+ mov #-1, r0
+
.align 2
.Lgot0:
.long _GLOBAL_OFFSET_TABLE_
@@ -160,7 +161,143 @@ __new_sem_wait:
.long __pthread_enable_asynccancel-.Lenable0b
.Ldisable0:
.long __pthread_disable_asynccancel-.Ldisable0b
-.Lunwind:
- .long HIDDEN_JUMPTARGET (__pthread_unwind)-.Lunwindb
.size __new_sem_wait,.-__new_sem_wait
weak_alias(__new_sem_wait, sem_wait)
+
+
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ DEC (@(NWAITERS,r8), r2)
+.LcallUR:
+ mov.l .Lresume, r1
+#ifdef __PIC__
+ add r12, r1
+#endif
+ jsr @r1
+ nop
+ sleep
+
+ .align 2
+.Lresume:
+#ifdef __PIC__
+ .long _Unwind_Resume@GOTOFF
+#else
+ .long _Unwind_Resume
+#endif
+.LENDCODE:
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte 0xff ! @LPStart format (omit)
+ .byte 0xff ! @TType format (omit)
+ .byte 0x01 ! call-site format
+ ! DW_EH_PE_uleb128
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+ .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+ .ualong .LENDCIE-.LSTARTCIE ! Length of the CIE.
+.LSTARTCIE:
+ .ualong 0 ! CIE ID.
+ .byte 1 ! Version number.
+#ifdef SHARED
+ .string "zPLR" ! NUL-terminated augmentation
+ ! string.
+#else
+ .string "zPL" ! NUL-terminated augmentation
+ ! string.
+#endif
+ .uleb128 1 ! Code alignment factor.
+ .sleb128 -4 ! Data alignment factor.
+ .byte 0x11 ! Return address register
+ ! column.
+#ifdef SHARED
+ .uleb128 7 ! Augmentation value length.
+ .byte 0x9b ! Personality: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4
+ ! + DW_EH_PE_indirect
+ .ualong DW.ref.__gcc_personality_v0-.
+ .byte 0x1b ! LSDA Encoding: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4.
+ .byte 0x1b ! FDE Encoding: DW_EH_PE_pcrel
+ ! + DW_EH_PE_sdata4.
+#else
+ .uleb128 6 ! Augmentation value length.
+ .byte 0x0 ! Personality: absolute
+ .ualong __gcc_personality_v0
+ .byte 0x0 ! LSDA Encoding: absolute
+#endif
+ .byte 0x0c ! DW_CFA_def_cfa
+ .uleb128 0xf
+ .uleb128 0
+ .align 4
+.LENDCIE:
+
+ .ualong .LENDFDE-.LSTARTFDE ! Length of the FDE.
+.LSTARTFDE:
+ .ualong .LSTARTFDE-.LSTARTFRAME ! CIE pointer.
+#ifdef SHARED
+ .ualong .LSTARTCODE-. ! PC-relative start address
+ ! of the code.
+#else
+ .ualong .LSTARTCODE ! Start address of the code.
+#endif
+ .ualong .LENDCODE-.LSTARTCODE ! Length of the code.
+ .uleb128 4 ! Augmentation size
+#ifdef SHARED
+ .ualong .LexceptSTART-.
+#else
+ .ualong .LexceptSTART
+#endif
+
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r8-.LSTARTCODE
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 4
+ .byte 0x88 ! DW_CFA_offset r8
+ .uleb128 1
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r10-.Lpush_r8
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 8
+ .byte 0x8a ! DW_CFA_offset r10
+ .uleb128 2
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_r12-.Lpush_r10
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 12
+ .byte 0x8c ! DW_CFA_offset r12
+ .uleb128 3
+ .byte 4 ! DW_CFA_advance_loc4
+ .ualong .Lpush_pr-.Lpush_r12
+ .byte 14 ! DW_CFA_def_cfa_offset
+ .uleb128 16
+ .byte 0x91 ! DW_CFA_offset pr
+ .uleb128 4
+ .align 4
+.LENDFDE:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 4
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 4
+DW.ref.__gcc_personality_v0:
+ .long __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
index dbaa44385..ad2ca40ac 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -49,27 +49,32 @@
.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel; \
.Lpseudo_cancel: \
sts.l pr,@-r15; \
- .LCFI0: \
+ cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (pr, 0); \
add _IMM16,r15; \
+ cfi_adjust_cfa_offset (16); \
SAVE_ARGS_##args; \
- .LCFI1: \
CENABLE; \
LOAD_ARGS_##args; \
add _IMP16,r15; \
- .LCFI2: \
+ cfi_adjust_cfa_offset (-16); \
lds.l @r15+,pr; \
- .LCFI3: \
+ cfi_adjust_cfa_offset (-4); \
+ cfi_restore (pr); \
DO_CALL(syscall_name, args); \
SYSCALL_INST_PAD; \
sts.l pr,@-r15; \
- .LCFI4: \
+ cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (pr, 0); \
mov.l r0,@-r15; \
- .LCFI5: \
+ cfi_adjust_cfa_offset (4); \
+ cfi_rel_offset (r0, 0); \
CDISABLE; \
mov.l @r15+,r0; \
- .LCFI6: \
+ cfi_adjust_cfa_offset (-4); \
lds.l @r15+,pr; \
- .LCFI7: \
+ cfi_adjust_cfa_offset (-4); \
+ cfi_restore (pr); \
mov r0,r1; \
mov _IMM12,r2; \
shad r2,r1; \
@@ -78,106 +83,17 @@
bf .Lpseudo_end; \
.Lsyscall_error: \
SYSCALL_ERROR_HANDLER; \
- .Lpseudo_end: \
- /* Create unwinding information for the syscall wrapper. */ \
- .section .eh_frame,"a",@progbits; \
- .Lframe1: \
- .ualong .LECIE1-.LSCIE1; \
- .LSCIE1: \
- .ualong 0x0; \
- .byte 0x1; \
- AUGMENTATION_STRING; \
- .uleb128 0x1; \
- .sleb128 -4; \
- .byte 0x11; \
- AUGMENTATION_PARAM; \
- .byte 0xc; \
- .uleb128 0xf; \
- .uleb128 0x0; \
- .align 2; \
- .LECIE1: \
- .LSFDE1: \
- .ualong .LEFDE1-.LASFDE1; \
- .LASFDE1: \
- .ualong .LASFDE1-.Lframe1; \
- START_SYMBOL_REF; \
- .ualong .Lpseudo_end - .Lpseudo_start; \
- AUGMENTATION_PARAM_FDE; \
- .byte 0x4; \
- .ualong .LCFI0-.Lpseudo_start; \
- .byte 0xe; \
- .uleb128 0x4; \
- .byte 0x91; \
- .uleb128 0x1; \
- .byte 0x4; \
- .ualong .LCFI1-.LCFI0; \
- .byte 0xe; \
- .uleb128 0x14; \
- FRAME_REG_##args; \
- .byte 0x4; \
- .ualong .LCFI2-.LCFI1; \
- .byte 0xe; \
- .uleb128 0x4; \
- .byte 0x4; \
- .ualong .LCFI3-.LCFI2; \
- .byte 0xe; \
- .uleb128 0x0; \
- .byte 0xd1; \
- .byte 0x4; \
- .ualong .LCFI4-.LCFI3; \
- .byte 0xe; \
- .uleb128 0x4; \
- .byte 0x91; \
- .uleb128 0x1; \
- .byte 0x4; \
- .ualong .LCFI5-.LCFI4; \
- .byte 0xe; \
- .uleb128 0x8; \
- .byte 0x80; \
- .uleb128 0x2; \
- .byte 0x4; \
- .ualong .LCFI6-.LCFI5; \
- .byte 0xe; \
- .uleb128 0x4; \
- .byte 0xc0; \
- .byte 0x4; \
- .ualong .LCFI7-.LCFI6; \
- .byte 0xe; \
- .uleb128 0x0; \
- .byte 0xd1; \
- .align 2; \
- .LEFDE1: \
- .previous
-
-# ifdef SHARED
-# define AUGMENTATION_STRING .string "zR"
-# define AUGMENTATION_PARAM .uleb128 1; .byte 0x1b
-# define AUGMENTATION_PARAM_FDE .uleb128 0
-# define START_SYMBOL_REF .long .Lpseudo_start-.
-# else
-# define AUGMENTATION_STRING .ascii "\0"
-# define AUGMENTATION_PARAM
-# define AUGMENTATION_PARAM_FDE
-# define START_SYMBOL_REF .long .Lpseudo_start
-# endif
-
-# define FRAME_REG_0 /* Nothing. */
-# define FRAME_REG_1 FRAME_REG_0; .byte 0x84; .uleb128 5
-# define FRAME_REG_2 FRAME_REG_1; .byte 0x85; .uleb128 4
-# define FRAME_REG_3 FRAME_REG_2; .byte 0x86; .uleb128 3
-# define FRAME_REG_4 FRAME_REG_3; .byte 0x87; .uleb128 2
-# define FRAME_REG_5 FRAME_REG_4
-# define FRAME_REG_6 FRAME_REG_5
+ .Lpseudo_end:
# undef PSEUDO_END
# define PSEUDO_END(sym) \
END (sym)
# define SAVE_ARGS_0 /* Nothing. */
-# define SAVE_ARGS_1 SAVE_ARGS_0; mov.l r4,@(0,r15)
-# define SAVE_ARGS_2 SAVE_ARGS_1; mov.l r5,@(4,r15)
-# define SAVE_ARGS_3 SAVE_ARGS_2; mov.l r6,@(8,r15)
-# define SAVE_ARGS_4 SAVE_ARGS_3; mov.l r7,@(12,r15)
+# define SAVE_ARGS_1 SAVE_ARGS_0; mov.l r4,@(0,r15); cfi_offset (r4,-4)
+# define SAVE_ARGS_2 SAVE_ARGS_1; mov.l r5,@(4,r15); cfi_offset (r5,-8)
+# define SAVE_ARGS_3 SAVE_ARGS_2; mov.l r6,@(8,r15); cfi_offset (r6,-12)
+# define SAVE_ARGS_4 SAVE_ARGS_3; mov.l r7,@(12,r15); cfi_offset (r7,-16)
# define SAVE_ARGS_5 SAVE_ARGS_4
# define SAVE_ARGS_6 SAVE_ARGS_5
@@ -245,3 +161,9 @@
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S
index a45c09fd6..5433eacbe 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/vfork.S
@@ -66,6 +66,6 @@ ENTRY (__vfork)
.word PID - TLS_PRE_TCB_SIZE
.align 2
PSEUDO_END (__vfork)
-hidden_def (vfork)
+libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/smp.h b/libpthread/nptl/sysdeps/unix/sysv/linux/smp.h
index 41fa97ff4..fcc34f768 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/smp.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/smp.h
@@ -1,5 +1,5 @@
/* Determine whether the host has multiple processors. Linux version.
- Copyright (C) 1996, 2002, 2004 Free Software Foundation, Inc.
+ Copyright (C) 1996, 2002, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -17,36 +17,12 @@
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include <errno.h>
-#include <fcntl.h>
-#include <string.h>
-#include <sys/sysctl.h>
-#include <not-cancel.h>
-
/* Test whether the machine has more than one processor. This is not the
best test but good enough. More complicated tests would require `malloc'
which is not available at that time. */
static inline int
is_smp_system (void)
{
- static const int sysctl_args[] = { CTL_KERN, KERN_VERSION };
- char buf[512];
- size_t reslen = sizeof (buf);
-
- /* Try reading the number using `sysctl' first. */
- if (sysctl ((int *) sysctl_args,
- sizeof (sysctl_args) / sizeof (sysctl_args[0]),
- buf, &reslen, NULL, 0) < 0)
- {
- /* This was not successful. Now try reading the /proc filesystem. */
- int fd = open_not_cancel_2 ("/proc/sys/kernel/version", O_RDONLY);
- if (__builtin_expect (fd, 0) == -1
- || (reslen = read_not_cancel (fd, buf, sizeof (buf))) <= 0)
- /* This also didn't work. We give up and say it's a UP machine. */
- buf[0] = '\0';
-
- close_not_cancel_no_status (fd);
- }
-
- return strstr (buf, "SMP") != NULL;
+ /* Assume all machines are SMP and/or CMT and/or SMT. */
+ return 1;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch
index b54731075..b6df6dcc1 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Makefile.arch
@@ -6,7 +6,8 @@
#
libpthread_SSRC = pt-vfork.S clone.S
-libpthread_CSRC = pthread_once.c lowlevellock.c
+libpthread_CSRC = pthread_once.c lowlevellock.c \
+ pthred_barrier_init.c pthread_barrier_wait.c pthread_barrier_destroy.c
libc_a_CSRC = fork.c libc-lowlevellock.c
libc_a_SSRC = clone.S vfork.S
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions
deleted file mode 100644
index d10277248..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/Versions
+++ /dev/null
@@ -1,6 +0,0 @@
-libpthread {
- GLIBC_2.3.3 {
- # Changed PTHREAD_STACK_MIN.
- pthread_attr_setstack; pthread_attr_setstacksize;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h
index e082ea8f0..6e356031d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/local_lim.h
@@ -1,5 +1,5 @@
/* Minimum guaranteed maximum values for system limits. Linux/SPARC version.
- Copyright (C) 1993-1998,2000,2002,2003,2004 Free Software Foundation, Inc.
+ Copyright (C) 1993-1998,2000,2002-2004,2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
@@ -31,6 +31,9 @@
#ifndef OPEN_MAX
# define __undef_OPEN_MAX
#endif
+#ifndef ARG_MAX
+# define __undef_ARG_MAX
+#endif
/* The kernel sources contain a file with all the needed information. */
#include <linux/limits.h>
@@ -50,6 +53,11 @@
# undef OPEN_MAX
# undef __undef_OPEN_MAX
#endif
+/* Have to remove ARG_MAX? */
+#ifdef __undef_ARG_MAX
+# undef ARG_MAX
+# undef __undef_ARG_MAX
+#endif
/* The number of data keys per process. */
#define _POSIX_THREAD_KEYS_MAX 128
@@ -87,3 +95,6 @@
/* Maximum message queue priority level. */
#define MQ_PRIO_MAX 32768
+
+/* Maximum value the semaphore can have. */
+#define SEM_VALUE_MAX (2147483647)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
index 459d1ca79..faf058486 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
@@ -1,5 +1,5 @@
/* Machine-specific pthread type layouts. SPARC version.
- Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -90,7 +90,7 @@ typedef union
#if __WORDSIZE == 64
int __spins;
__pthread_list_t __list;
-# define __PTHREAD_MUTEX_HAVE_PREV 1
+# define __PTHREAD_MUTEX_HAVE_PREV 1
#else
unsigned int __nusers;
__extension__ union
@@ -160,9 +160,9 @@ typedef union
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
int __writer;
- int __pad1;
+ int __shared;
+ unsigned long int __pad1;
unsigned long int __pad2;
- unsigned long int __pad3;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
unsigned int __flags;
@@ -176,9 +176,12 @@ typedef union
unsigned int __writer_wakeup;
unsigned int __nr_readers_queued;
unsigned int __nr_writers_queued;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __shared;
/* FLAGS must stay at this position in the structure to maintain
binary compatibility. */
- unsigned int __flags;
+ unsigned char __flags;
int __writer;
} __data;
# endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h
index 7f3a32832..8fd7d344e 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/bits/semaphore.h
@@ -33,9 +33,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
new file mode 100644
index 000000000..4f400a3fe
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/internaltypes.h
@@ -0,0 +1,34 @@
+#ifndef _INTERNALTYPES_H
+#include "../internaltypes.h"
+
+union sparc_pthread_barrier
+{
+ struct pthread_barrier b;
+ struct sparc_pthread_barrier_s
+ {
+ unsigned int curr_event;
+ int lock;
+ unsigned int left;
+ unsigned int init_count;
+ unsigned char left_lock;
+ unsigned char pshared;
+ } s;
+};
+
+struct sparc_new_sem
+{
+ unsigned int value;
+ unsigned char lock;
+ unsigned char private;
+ unsigned char pad[2];
+ unsigned long int nwaiters;
+};
+
+struct sparc_old_sem
+{
+ unsigned int value;
+ unsigned char lock;
+ unsigned char private;
+};
+
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c
index e9ec4df1e..80b0e7663 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.c
@@ -24,23 +24,36 @@
#include <sys/time.h>
-/* These functions don't get included in libc.so */
void
-__lll_lock_wait (int *futex)
+__lll_lock_wait_private (int *futex)
{
do
{
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_wait (futex, 2);
+ lll_futex_wait (futex, 2, LLL_PRIVATE);
}
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
}
+/* These functions don't get included in libc.so */
#ifdef IS_IN_libpthread
+void
+__lll_lock_wait (int *futex, int private)
+{
+ do
+ {
+ int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
+ if (oldval != 0)
+ lll_futex_wait (futex, 2, private);
+ }
+ while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
+}
+
+
int
-__lll_timedlock_wait (int *futex, const struct timespec *abstime)
+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private)
{
/* Reject invalid timeouts. */
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
@@ -70,25 +83,13 @@ __lll_timedlock_wait (int *futex, const struct timespec *abstime)
/* Wait. */
int oldval = atomic_compare_and_exchange_val_24_acq (futex, 2, 1);
if (oldval != 0)
- lll_futex_timed_wait (futex, 2, &rt);
+ lll_futex_timed_wait (futex, 2, &rt, private);
}
while (atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0);
return 0;
}
-int
-lll_unlock_wake_cb(int* futex)
-{
- int val = atomic_exchange_24_rel(futex, 0);
-
- if( __builtin_expect( val > 1, 0 ) ) {
- lll_futex_wake( futex, 1 );
- }
-
- return 0;
-}
-
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
@@ -122,7 +123,7 @@ __lll_timedwait_tid (int *tidp, const struct timespec *abstime)
/* Wait until thread terminates. The kernel so far does not use
the private futex operations for this. */
- if (lll_futex_timed_wait (tidp, tid, &rt) == -ETIMEDOUT)
+ if (lll_futex_timed_wait (tidp, tid, &rt, LLL_SHARED) == -ETIMEDOUT)
return ETIMEDOUT;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index 4db6fb0b4..a43f6b668 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -24,7 +25,7 @@
#include <sys/param.h>
#include <bits/pthreadtypes.h>
#include <atomic.h>
-#include <sysdep.h>
+#include <bits/kernel-features.h>
#define FUTEX_WAIT 0
@@ -33,62 +34,107 @@
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
+ & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
+# endif
+#endif
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define lll_futex_wait(futexp, val) \
- lll_futex_timed_wait (futexp, val, NULL)
+#define lll_futex_wait(futexp, val, private) \
+ lll_futex_timed_wait (futexp, val, NULL, private)
-#define lll_futex_timed_wait(futexp, val, timespec) \
+#define lll_futex_timed_wait(futexp, val, timespec, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
- __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
- FUTEX_WAIT, (val), (timespec)); \
+ \
+ __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
+ __lll_private_flag (FUTEX_WAIT, private), \
+ (val), (timespec)); \
__ret; \
})
-#define lll_futex_wake(futexp, nr) \
+#define lll_futex_wake(futexp, nr, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
+ \
__ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
- FUTEX_WAKE, (nr), 0); \
+ __lll_private_flag (FUTEX_WAKE, private), \
+ (nr), 0); \
__ret; \
})
/* Returns non-zero if error happened, zero if success. */
-#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val) \
+#define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
+ \
__ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
- FUTEX_CMP_REQUEUE, (nr_wake), (nr_move), (mutex), (val)); \
+ __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
+ (nr_wake), (nr_move), (mutex), (val)); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
-#define lll_robust_dead(futexv) \
+#define lll_robust_dead(futexv, private) \
do \
{ \
int *__futexp = &(futexv); \
atomic_or (__futexp, FUTEX_OWNER_DIED); \
- lll_futex_wake (__futexp, 1); \
+ lll_futex_wake (__futexp, 1, private); \
} \
while (0)
/* Returns non-zero if error happened, zero if success. */
#ifdef __sparc32_atomic_do_lock
/* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs. */
-# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) 1
+# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) 1
#else
-# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2) \
+# define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
({ \
INTERNAL_SYSCALL_DECL (__err); \
long int __ret; \
\
__ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
- FUTEX_WAKE_OP, \
+ __lll_private_flag (FUTEX_WAKE_OP, private), \
(nr_wake), (nr_wake2), (futexp2), \
FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
@@ -101,7 +147,7 @@ __lll_trylock (int *futex)
{
return atomic_compare_and_exchange_val_24_acq (futex, 1, 0) != 0;
}
-#define lll_mutex_trylock(futex) __lll_trylock (&(futex))
+#define lll_trylock(futex) __lll_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
@@ -109,7 +155,7 @@ __lll_cond_trylock (int *futex)
{
return atomic_compare_and_exchange_val_24_acq (futex, 2, 0) != 0;
}
-#define lll_mutex_cond_trylock(futex) __lll_cond_trylock (&(futex))
+#define lll_cond_trylock(futex) __lll_cond_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
@@ -121,116 +167,108 @@ __lll_robust_trylock (int *futex, int id)
__lll_robust_trylock (&(futex), id)
-extern void __lll_lock_wait (int *futex) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
+extern void __lll_lock_wait_private (int *futex) attribute_hidden;
+extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
+extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
-__lll_lock (int *futex)
+__lll_lock (int *futex, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
if (__builtin_expect (val != 0, 0))
{
- __lll_lock_wait (futex);
+ if (__builtin_constant_p (private) && private == LLL_PRIVATE)
+ __lll_lock_wait_private (futex);
+ else
+ __lll_lock_wait (futex, private);
}
}
-#define lll_mutex_lock(futex) __lll_lock (&(futex))
+#define lll_lock(futex, private) __lll_lock (&(futex), private)
static inline int
__attribute__ ((always_inline))
-__lll_robust_lock (int *futex, int id)
+__lll_robust_lock (int *futex, int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex);
+ result = __lll_robust_lock_wait (futex, private);
return result;
}
-#define lll_robust_lock(futex, id) \
- __lll_robust_lock (&(futex), id)
+#define lll_robust_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), id, private)
static inline void
__attribute__ ((always_inline))
-__lll_cond_lock (int *futex)
+__lll_cond_lock (int *futex, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 2, 0);
if (__builtin_expect (val != 0, 0))
- __lll_lock_wait (futex);
+ __lll_lock_wait (futex, private);
}
-#define lll_mutex_cond_lock(futex) __lll_cond_lock (&(futex))
+#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define lll_robust_cond_lock(futex, id) \
- __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS)
+#define lll_robust_cond_lock(futex, id, private) \
+ __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-extern int __lll_timedlock_wait (int *futex, const struct timespec *) attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *) attribute_hidden;
+extern int __lll_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
+extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
+ int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
-__lll_timedlock (int *futex, const struct timespec *abstime)
+__lll_timedlock (int *futex, const struct timespec *abstime, int private)
{
int val = atomic_compare_and_exchange_val_24_acq (futex, 1, 0);
int result = 0;
if (__builtin_expect (val != 0, 0))
- result = __lll_timedlock_wait (futex, abstime);
+ result = __lll_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_mutex_timedlock(futex, abstime) \
- __lll_timedlock (&(futex), abstime)
+#define lll_timedlock(futex, abstime, private) \
+ __lll_timedlock (&(futex), abstime, private)
static inline int
__attribute__ ((always_inline))
__lll_robust_timedlock (int *futex, const struct timespec *abstime,
- int id)
+ int id, int private)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_timedlock_wait (futex, abstime);
+ result = __lll_robust_timedlock_wait (futex, abstime, private);
return result;
}
-#define lll_robust_timedlock(futex, abstime, id) \
- __lll_robust_timedlock (&(futex), abstime, id)
+#define lll_robust_timedlock(futex, abstime, id, private) \
+ __lll_robust_timedlock (&(futex), abstime, id, private)
-#define lll_mutex_unlock(lock) \
+#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_24_rel (__futex, 0); \
if (__builtin_expect (__val > 1, 0)) \
- lll_futex_wake (__futex, 1); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_robust_unlock(lock) \
+#define lll_robust_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
- lll_futex_wake (__futex, 1); \
+ lll_futex_wake (__futex, 1, private); \
}))
-#define lll_mutex_islocked(futex) \
+#define lll_islocked(futex) \
(futex != 0)
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
/* Initializers for lock. */
#define LLL_LOCK_INITIALIZER (0)
#define LLL_LOCK_INITIALIZER_LOCKED (1)
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-#define lll_trylock(lock) lll_mutex_trylock (lock)
-#define lll_lock(lock) lll_mutex_lock (lock)
-#define lll_unlock(lock) lll_mutex_unlock (lock)
-#define lll_islocked(lock) lll_mutex_islocked (lock)
-
-
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
wakeup when the clone terminates. The memory location contains the
thread ID while the clone is running and is reset to zero
@@ -240,7 +278,7 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
{ \
__typeof (tid) __tid; \
while ((__tid = (tid)) != 0) \
- lll_futex_wait (&(tid), __tid); \
+ lll_futex_wait (&(tid), __tid, LLL_SHARED); \
} \
while (0)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h
new file mode 100644
index 000000000..acf1a617e
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/not-cancel.h
@@ -0,0 +1 @@
+#include "../i386/not-cancel.h"
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
new file mode 100644
index 000000000..ca96379c9
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_destroy.c
@@ -0,0 +1,45 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_destroy (
+ pthread_barrier_t *barrier)
+{
+ union sparc_pthread_barrier *ibarrier;
+ int result = EBUSY;
+
+ ibarrier = (union sparc_pthread_barrier *) barrier;
+
+ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+ lll_lock (ibarrier->b.lock, private);
+
+ if (__builtin_expect (ibarrier->b.left == ibarrier->b.init_count, 1))
+ /* The barrier is not used anymore. */
+ result = 0;
+ else
+ /* Still used, return with an error. */
+ lll_unlock (ibarrier->b.lock, private);
+
+ return result;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
new file mode 100644
index 000000000..6ca472778
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_barrier_init.c
@@ -0,0 +1,55 @@
+/* Copyright (C) 2002, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include "pthreadP.h"
+#include <lowlevellock.h>
+
+int
+pthread_barrier_init (
+ pthread_barrier_t *barrier,
+ const pthread_barrierattr_t *attrm,
+ unsigned int count)
+{
+ union sparc_pthread_barrier *ibarrier;
+
+ if (__builtin_expect (count == 0, 0))
+ return EINVAL;
+
+ struct pthread_barrierattr *iattr = (struct pthread_barrierattr *) attr;
+ if (iattr != NULL)
+ {
+ if (iattr->pshared != PTHREAD_PROCESS_PRIVATE
+ && __builtin_expect (iattr->pshared != PTHREAD_PROCESS_SHARED, 0))
+ /* Invalid attribute. */
+ return EINVAL;
+ }
+
+ ibarrier = (union sparc_pthread_barrier *) barrier;
+
+ /* Initialize the individual fields. */
+ ibarrier->b.lock = LLL_LOCK_INITIALIZER;
+ ibarrier->b.left = count;
+ ibarrier->b.init_count = count;
+ ibarrier->b.curr_event = 0;
+ ibarrier->s.left_lock = 0;
+ ibarrier->s.pshared = (iattr && iattr->pshared == PTHREAD_PROCESS_SHARED);
+
+ return 0;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c
index 3b07cc127..22e2dd3c0 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/pthread_once.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
@@ -30,7 +30,7 @@ clear_once_control (void *arg)
pthread_once_t *once_control = (pthread_once_t *) arg;
*once_control = 0;
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
}
@@ -65,7 +65,7 @@ __pthread_once (once_control, init_routine)
if (((oldval ^ newval) & -4) == 0)
{
/* Same generation, some other thread was faster. Wait. */
- lll_futex_wait (once_control, newval);
+ lll_futex_wait (once_control, newval, LLL_PRIVATE);
continue;
}
}
@@ -84,7 +84,7 @@ __pthread_once (once_control, init_routine)
atomic_increment (once_control);
/* Wake up all other threads. */
- lll_futex_wake (once_control, INT_MAX);
+ lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE);
break;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c
new file mode 100644
index 000000000..f694b5e93
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sem_init.c
@@ -0,0 +1,57 @@
+/* Copyright (C) 2002, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <string.h>
+#include <semaphore.h>
+#include <lowlevellock.h>
+#include "semaphoreP.h"
+#include <bits/kernel-features.h>
+
+
+int
+__new_sem_init (sem, pshared, value)
+ sem_t *sem;
+ int pshared;
+ unsigned int value;
+{
+ /* Parameter sanity check. */
+ if (__builtin_expect (value > SEM_VALUE_MAX, 0))
+ {
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ /* Map to the internal type. */
+ struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+
+ /* Use the values the user provided. */
+ memset (isem, '\0', sizeof (*isem));
+ isem->value = value;
+#ifdef __ASSUME_PRIVATE_FUTEX
+ isem->private = pshared ? 0 : FUTEX_PRIVATE_FLAG;
+#else
+ isem->private = pshared ? 0 : THREAD_GETMEM (THREAD_SELF,
+ header.private_futex);
+#endif
+
+ return 0;
+}
+weak_alias(__new_sem_init, sem_init)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
new file mode 100644
index 000000000..302d1b371
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/pthread_barrier_wait.c
@@ -0,0 +1,94 @@
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <pthreadP.h>
+
+/* Wait on barrier. */
+int
+pthread_barrier_wait (
+ pthread_barrier_t *barrier)
+{
+ union sparc_pthread_barrier *ibarrier
+ = (union sparc_pthread_barrier *) barrier;
+ int result = 0;
+ int private = ibarrier->s.pshared ? LLL_SHARED : LLL_PRIVATE;
+
+ /* Make sure we are alone. */
+ lll_lock (ibarrier->b.lock, private);
+
+ /* One more arrival. */
+ --ibarrier->b.left;
+
+ /* Are these all? */
+ if (ibarrier->b.left == 0)
+ {
+ /* Yes. Increment the event counter to avoid invalid wake-ups and
+ tell the current waiters that it is their turn. */
+ ++ibarrier->b.curr_event;
+
+ /* Wake up everybody. */
+ lll_futex_wake (&ibarrier->b.curr_event, INT_MAX, private);
+
+ /* This is the thread which finished the serialization. */
+ result = PTHREAD_BARRIER_SERIAL_THREAD;
+ }
+ else
+ {
+ /* The number of the event we are waiting for. The barrier's event
+ number must be bumped before we continue. */
+ unsigned int event = ibarrier->b.curr_event;
+
+ /* Before suspending, make the barrier available to others. */
+ lll_unlock (ibarrier->b.lock, private);
+
+ /* Wait for the event counter of the barrier to change. */
+ do
+ lll_futex_wait (&ibarrier->b.curr_event, event, private);
+ while (event == ibarrier->b.curr_event);
+ }
+
+ /* Make sure the init_count is stored locally or in a register. */
+ unsigned int init_count = ibarrier->b.init_count;
+
+ /* If this was the last woken thread, unlock. */
+ if (__atomic_is_v9 || ibarrier->s.pshared == 0)
+ {
+ if (atomic_increment_val (&ibarrier->b.left) == init_count)
+ /* We are done. */
+ lll_unlock (ibarrier->b.lock, private);
+ }
+ else
+ {
+ unsigned int left;
+ /* Slightly more complicated. On pre-v9 CPUs, atomic_increment_val
+ is only atomic for threads within the same process, not for
+ multiple processes. */
+ __sparc32_atomic_do_lock24 (&ibarrier->s.left_lock);
+ left = ++ibarrier->b.left;
+ __sparc32_atomic_do_unlock24 (&ibarrier->s.left_lock);
+ if (left == init_count)
+ /* We are done. */
+ lll_unlock (ibarrier->b.lock, private);
+ }
+
+ return result;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c
new file mode 100644
index 000000000..940728eeb
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_post.c
@@ -0,0 +1,55 @@
+/* sem_post -- post to a POSIX semaphore. SPARC version.
+ Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+int
+__new_sem_post (sem_t *sem)
+{
+ struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+ int nr;
+
+ if (__atomic_is_v9)
+ nr = atomic_increment_val (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ nr = ++(isem->value);
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+ atomic_full_barrier ();
+ if (isem->nwaiters > 0)
+ {
+ int err = lll_futex_wake (&isem->value, 1,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+ if (__builtin_expect (err, 0) < 0)
+ {
+ __set_errno (-err);
+ return -1;
+ }
+ }
+ return 0;
+}
+weak_alias(__new_sem_post, sem_post)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
new file mode 100644
index 000000000..aa5bd80ed
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_timedwait.c
@@ -0,0 +1,148 @@
+/* sem_timedwait -- wait on a semaphore. SPARC version.
+ Copyright (C) 2003, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+#include <pthreadP.h>
+
+
+extern void __sem_wait_cleanup (void *arg) attribute_hidden;
+
+
+int
+sem_timedwait (sem_t *sem, const struct timespec *abstime)
+{
+ struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+ int err;
+ int val;
+
+ if (__atomic_is_v9)
+ val = atomic_decrement_if_positive (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ val = isem->value;
+ if (val > 0)
+ isem->value = val - 1;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ if (val > 0)
+ return 0;
+
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
+ {
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ if (__atomic_is_v9)
+ atomic_increment (&isem->nwaiters);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ isem->nwaiters++;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+ while (1)
+ {
+ struct timeval tv;
+ struct timespec rt;
+ int sec, nsec;
+
+ /* Get the current time. */
+ __gettimeofday (&tv, NULL);
+
+ /* Compute relative timeout. */
+ sec = abstime->tv_sec - tv.tv_sec;
+ nsec = abstime->tv_nsec - tv.tv_usec * 1000;
+ if (nsec < 0)
+ {
+ nsec += 1000000000;
+ --sec;
+ }
+
+ /* Already timed out? */
+ err = -ETIMEDOUT;
+ if (sec < 0)
+ {
+ __set_errno (ETIMEDOUT);
+ err = -1;
+ break;
+ }
+
+ /* Do wait. */
+ rt.tv_sec = sec;
+ rt.tv_nsec = nsec;
+
+ /* Enable asynchronous cancellation. Required by the standard. */
+ int oldtype = __pthread_enable_asynccancel ();
+
+ err = lll_futex_timed_wait (&isem->value, 0, &rt,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (oldtype);
+
+ if (err != 0 && err != -EWOULDBLOCK)
+ {
+ __set_errno (-err);
+ err = -1;
+ break;
+ }
+
+ if (__atomic_is_v9)
+ val = atomic_decrement_if_positive (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ val = isem->value;
+ if (val > 0)
+ isem->value = val - 1;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ if (val > 0)
+ {
+ err = 0;
+ break;
+ }
+ }
+
+ pthread_cleanup_pop (0);
+
+ if (__atomic_is_v9)
+ atomic_decrement (&isem->nwaiters);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ isem->nwaiters--;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ return err;
+}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c
new file mode 100644
index 000000000..d4e893805
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_trywait.c
@@ -0,0 +1,54 @@
+/* sem_trywait -- wait on a semaphore. SPARC version.
+ Copyright (C) 2003, 2006, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+
+int
+__new_sem_trywait (sem_t *sem)
+{
+ struct sparc_old_sem *isem = (struct sparc_old_sem *) sem;
+ int val;
+
+ if (isem->value > 0)
+ {
+ if (__atomic_is_v9)
+ val = atomic_decrement_if_positive (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ val = isem->value;
+ if (val > 0)
+ isem->value = val - 1;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+ if (val > 0)
+ return 0;
+ }
+
+ __set_errno (EAGAIN);
+ return -1;
+}
+weak_alias(__new_sem_trywait, sem_trywait)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c
new file mode 100644
index 000000000..cfe04a802
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sem_wait.c
@@ -0,0 +1,127 @@
+/* sem_wait -- wait on a semaphore. Generic futex-using version.
+ Copyright (C) 2003, 2007 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <errno.h>
+#include <sysdep.h>
+#include <lowlevellock.h>
+#include <internaltypes.h>
+#include <semaphore.h>
+
+#include <pthreadP.h>
+
+
+void
+attribute_hidden
+__sem_wait_cleanup (void *arg)
+{
+ struct sparc_new_sem *isem = (struct sparc_new_sem *) arg;
+
+ if (__atomic_is_v9)
+ atomic_decrement (&isem->nwaiters);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ isem->nwaiters--;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+}
+
+
+int
+__new_sem_wait (sem_t *sem)
+{
+ struct sparc_new_sem *isem = (struct sparc_new_sem *) sem;
+ int err;
+ int val;
+
+ if (__atomic_is_v9)
+ val = atomic_decrement_if_positive (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ val = isem->value;
+ if (val > 0)
+ isem->value = val - 1;
+ else
+ isem->nwaiters++;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ if (val > 0)
+ return 0;
+
+ if (__atomic_is_v9)
+ atomic_increment (&isem->nwaiters);
+ else
+ /* Already done above while still holding isem->lock. */;
+
+ pthread_cleanup_push (__sem_wait_cleanup, isem);
+
+ while (1)
+ {
+ /* Enable asynchronous cancellation. Required by the standard. */
+ int oldtype = __pthread_enable_asynccancel ();
+
+ err = lll_futex_wait (&isem->value, 0,
+ isem->private ^ FUTEX_PRIVATE_FLAG);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (oldtype);
+
+ if (err != 0 && err != -EWOULDBLOCK)
+ {
+ __set_errno (-err);
+ err = -1;
+ break;
+ }
+
+ if (__atomic_is_v9)
+ val = atomic_decrement_if_positive (&isem->value);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ val = isem->value;
+ if (val > 0)
+ isem->value = val - 1;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ if (val > 0)
+ {
+ err = 0;
+ break;
+ }
+ }
+
+ pthread_cleanup_pop (0);
+
+ if (__atomic_is_v9)
+ atomic_decrement (&isem->nwaiters);
+ else
+ {
+ __sparc32_atomic_do_lock24 (&isem->lock);
+ isem->nwaiters--;
+ __sparc32_atomic_do_unlock24 (&isem->lock);
+ }
+
+ return err;
+}
+weak_alias(__new_sem_wait, sem_wait)
+
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h
index ad650d040..1f55bd623 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/sysdep-cancel.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Jakub Jelinek <jakub@redhat.com>, 2002.
@@ -26,47 +26,48 @@
#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
# undef PSEUDO
-# define PSEUDO(name, syscall_name, args) \
- .text; \
- .globl __syscall_error; \
-ENTRY(name) \
- ld [%g7 + MULTIPLE_THREADS_OFFSET], %g1;\
- cmp %g1, 0; \
- bne 1f; \
-.type __##syscall_name##_nocancel,@function; \
-.globl __##syscall_name##_nocancel; \
-__##syscall_name##_nocancel: \
- mov SYS_ify(syscall_name), %g1; \
- ta 0x10; \
- bcc 8f; \
- mov %o7, %g1; \
- call __syscall_error; \
- mov %g1, %o7; \
-8: jmpl %o7 + 8, %g0; \
- nop; \
-.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;\
-1: save %sp, -96, %sp; \
- cfi_def_cfa_register(%fp); \
- cfi_window_save; \
- cfi_register(%o7, %i7); \
- CENABLE; \
- nop; \
- mov %o0, %l0; \
- COPY_ARGS_##args \
- mov SYS_ify(syscall_name), %g1; \
- ta 0x10; \
- bcc 1f; \
- mov %o0, %l1; \
- CDISABLE; \
- mov %l0, %o0; \
- call __syscall_error; \
- mov %l1, %o0; \
- b 2f; \
- mov -1, %l1; \
-1: CDISABLE; \
- mov %l0, %o0; \
-2: jmpl %i7 + 8, %g0; \
- restore %g0, %l1, %o0;
+# define PSEUDO(name, syscall_name, args) \
+ .text; \
+ .globl __syscall_error; \
+ENTRY(name) \
+ ld [%g7 + MULTIPLE_THREADS_OFFSET], %g1;\
+ cmp %g1, 0; \
+ bne 1f; \
+.type __##syscall_name##_nocancel,@function; \
+.globl __##syscall_name##_nocancel; \
+__##syscall_name##_nocancel: \
+ mov SYS_ify(syscall_name), %g1; \
+ ta 0x10; \
+ bcc 8f; \
+ mov %o7, %g1; \
+ call __syscall_error; \
+ mov %g1, %o7; \
+8: jmpl %o7 + 8, %g0; \
+ nop; \
+.size __##syscall_name##_nocancel,.-__##syscall_name##_nocancel;\
+1: save %sp, -96, %sp; \
+ cfi_def_cfa_register(%fp); \
+ cfi_window_save; \
+ cfi_register(%o7, %i7); \
+ CENABLE; \
+ nop; \
+ mov %o0, %l0; \
+ COPY_ARGS_##args \
+ mov SYS_ify(syscall_name), %g1; \
+ ta 0x10; \
+ bcc 1f; \
+ mov %o0, %l1; \
+ CDISABLE; \
+ mov %l0, %o0; \
+ call __syscall_error; \
+ mov %l1, %o0; \
+ b 2f; \
+ mov -1, %l1; \
+1: CDISABLE; \
+ mov %l0, %o0; \
+2: jmpl %i7 + 8, %g0; \
+ restore %g0, %l1, %o0;
+
# ifdef IS_IN_libpthread
# define CENABLE call __pthread_enable_asynccancel
@@ -103,3 +104,9 @@ __##syscall_name##_nocancel: \
# define NO_CANCELLATION 1
#endif
+
+#ifndef __ASSEMBLER__
+# define RTLD_SINGLE_THREAD_P \
+ __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
+ header.multiple_threads) == 0, 1)
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S
index 1a3827789..a8e4dd5a4 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc32/vfork.S
@@ -45,5 +45,5 @@ ENTRY(__vfork)
nop
END(__vfork)
-hidden_def (vfork)
+libc_hidden_def (__vfork)
weak_alias (__vfork, vfork)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions
deleted file mode 100644
index 3b111ddb5..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/sparc/sparc64/Versions
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
- GLIBC_2.3.3 {
- # Changed timer_t.
- timer_create; timer_delete; timer_getoverrun; timer_gettime;
- timer_settime;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym b/libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym
new file mode 100644
index 000000000..0e2a15f2b
--- /dev/null
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/structsem.sym
@@ -0,0 +1,12 @@
+#include <limits.h>
+#include <stddef.h>
+#include <sched.h>
+#include <bits/pthreadtypes.h>
+#include "internaltypes.h"
+
+--
+
+VALUE offsetof (struct new_sem, value)
+PRIVATE offsetof (struct new_sem, private)
+NWAITERS offsetof (struct new_sem, nwaiters)
+SEM_VALUE_MAX SEM_VALUE_MAX
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c
index 9f02fe3b2..a7da2a09a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_create.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003,2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003,2004, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -167,6 +167,7 @@ timer_create (
/* Copy the thread parameters the user provided. */
newp->sival = evp->sigev_value;
newp->thrfunc = evp->sigev_notify_function;
+ newp->sigev_notify = SIGEV_THREAD;
/* We cannot simply copy the thread attributes since the
implementation might keep internal information for
@@ -193,12 +194,11 @@ timer_create (
PTHREAD_CREATE_DETACHED);
/* Create the event structure for the kernel timer. */
- struct sigevent sev;
- sev.sigev_value.sival_ptr = newp;
- sev.sigev_signo = SIGTIMER;
- sev.sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID;
- /* This is the thread ID of the helper thread. */
- sev._sigev_un._pad[0] = __helper_tid;
+ struct sigevent sev =
+ { .sigev_value.sival_ptr = newp,
+ .sigev_signo = SIGTIMER,
+ .sigev_notify = SIGEV_SIGNAL | SIGEV_THREAD_ID,
+ ._sigev_un = { ._pad = { [0] = __helper_tid } } };
/* Create the timer. */
INTERNAL_SYSCALL_DECL (err);
@@ -207,6 +207,13 @@ timer_create (
syscall_clockid, &sev, &newp->ktimerid);
if (! INTERNAL_SYSCALL_ERROR_P (res, err))
{
+ /* Add to the queue of active timers with thread
+ delivery. */
+ pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+ newp->next = __active_timer_sigev_thread;
+ __active_timer_sigev_thread = newp;
+ pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
+
*timerid = (timer_t) newp;
return 0;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c
index 9b92446d5..5ad40b99a 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_delete.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -39,7 +39,8 @@ static int compat_timer_delete (timer_t timerid);
int
-timer_delete (timer_t timerid)
+timer_delete (
+ timer_t timerid)
{
# undef timer_delete
# ifndef __ASSUME_POSIX_TIMERS
@@ -53,6 +54,27 @@ timer_delete (timer_t timerid)
if (res == 0)
{
+ if (kt->sigev_notify == SIGEV_THREAD)
+ {
+ /* Remove the timer from the list. */
+ pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+ if (__active_timer_sigev_thread == kt)
+ __active_timer_sigev_thread = kt->next;
+ else
+ {
+ struct timer *prevp = __active_timer_sigev_thread;
+ while (prevp->next != NULL)
+ if (prevp->next == kt)
+ {
+ prevp->next = kt->next;
+ break;
+ }
+ else
+ prevp = prevp->next;
+ }
+ pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
+ }
+
# ifndef __ASSUME_POSIX_TIMERS
/* We know the syscall support is available. */
__no_posix_timers = 1;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c
index 7afc5ec6f..62a558aef 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_getoverr.c
@@ -38,7 +38,8 @@ static int compat_timer_getoverrun (timer_t timerid);
int
-timer_getoverrun (timer_t timerid)
+timer_getoverrun (
+ timer_t timerid)
{
# undef timer_getoverrun
# ifndef __ASSUME_POSIX_TIMERS
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c
index 924c524ba..2681961bf 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/timer_routines.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
@@ -27,6 +27,19 @@
#include "kernel-posix-timers.h"
+/* List of active SIGEV_THREAD timers. */
+struct timer *__active_timer_sigev_thread;
+/* Lock for the __active_timer_sigev_thread. */
+pthread_mutex_t __active_timer_sigev_thread_lock = PTHREAD_MUTEX_INITIALIZER;
+
+
+struct thread_start_data
+{
+ void (*thrfunc) (sigval_t);
+ sigval_t sival;
+};
+
+
#ifdef __NR_timer_create
/* Helper thread to call the user-provided function. */
static void *
@@ -40,10 +53,16 @@ timer_sigev_thread (void *arg)
INTERNAL_SYSCALL_DECL (err);
INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, NULL, _NSIG / 8);
- struct timer *tk = (struct timer *) arg;
+ struct thread_start_data *td = (struct thread_start_data *) arg;
+
+ void (*thrfunc) (sigval_t) = td->thrfunc;
+ sigval_t sival = td->sival;
+
+ /* The TD object was allocated in timer_helper_thread. */
+ free (td);
/* Call the user-provided function. */
- tk->thrfunc (tk->sival);
+ thrfunc (sival);
return NULL;
}
@@ -83,9 +102,35 @@ timer_helper_thread (void *arg)
{
struct timer *tk = (struct timer *) si.si_ptr;
- /* That the signal we are waiting for. */
- pthread_t th;
- (void) pthread_create (&th, &tk->attr, timer_sigev_thread, tk);
+ /* Check the timer is still used and will not go away
+ while we are reading the values here. */
+ pthread_mutex_lock (&__active_timer_sigev_thread_lock);
+
+ struct timer *runp = __active_timer_sigev_thread;
+ while (runp != NULL)
+ if (runp == tk)
+ break;
+ else
+ runp = runp->next;
+
+ if (runp != NULL)
+ {
+ struct thread_start_data *td = malloc (sizeof (*td));
+
+ /* There is not much we can do if the allocation fails. */
+ if (td != NULL)
+ {
+ /* This is the signal we are waiting for. */
+ td->thrfunc = tk->thrfunc;
+ td->sival = tk->sival;
+
+ pthread_t th;
+ (void) pthread_create (&th, &tk->attr,
+ timer_sigev_thread, td);
+ }
+ }
+
+ pthread_mutex_unlock (&__active_timer_sigev_thread_lock);
}
else if (si.si_code == SI_TKILL)
/* The thread is canceled. */
@@ -125,7 +170,7 @@ __start_helper_thread (void)
/* Block all signals in the helper thread but SIGSETXID. To do this
thoroughly we temporarily have to block all signals here. The
helper can lose wakeups if SIGCANCEL is not blocked throughout,
- but sigfillset omits it SIGSETXID. So, we add it back
+ but sigfillset omits it SIGSETXID. So, we add SIGCANCEL back
explicitly here. */
sigset_t ss;
sigset_t oss;
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c b/libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
index 35955238b..94c78fce5 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/unregister-atfork.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,12 +19,13 @@
#include <errno.h>
#include <stdlib.h>
-#include "fork.h"
+#include <fork.h>
#include <atomic.h>
void
-__unregister_atfork (void *dso_handle)
+__unregister_atfork (
+ void *dso_handle)
{
/* Check whether there is any entry in the list which we have to
remove. It is likely that this is not the case so don't bother
@@ -53,7 +54,7 @@ __unregister_atfork (void *dso_handle)
that there couldn't have been another thread deleting something.
The __unregister_atfork function is only called from the
dlclose() code which itself serializes the operations. */
- lll_lock (__fork_lock);
+ lll_lock (__fork_lock, LLL_PRIVATE);
/* We have to create a new list with all the entries we don't remove. */
struct deleted_handler
@@ -66,10 +67,21 @@ __unregister_atfork (void *dso_handle)
It's a single linked list so readers are. */
do
{
+ again:
if (runp->dso_handle == dso_handle)
{
if (lastp == NULL)
- __fork_handlers = runp->next;
+ {
+ /* We have to use an atomic operation here because
+ __linkin_atfork also uses one. */
+ if (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
+ runp->next, runp)
+ != 0)
+ {
+ runp = __fork_handlers;
+ goto again;
+ }
+ }
else
lastp->next = runp->next;
@@ -88,7 +100,7 @@ __unregister_atfork (void *dso_handle)
while (runp != NULL);
/* Release the lock. */
- lll_unlock (__fork_lock);
+ lll_unlock (__fork_lock, LLL_PRIVATE);
/* Walk the list of all entries which have to be deleted. */
while (deleted != NULL)
@@ -103,7 +115,7 @@ __unregister_atfork (void *dso_handle)
atomic_decrement (&deleted->handler->refcntr);
unsigned int val;
while ((val = deleted->handler->refcntr) != 0)
- lll_futex_wait (&deleted->handler->refcntr, val);
+ lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE);
deleted = deleted->next;
}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
deleted file mode 100644
index 3b111ddb5..000000000
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/Versions
+++ /dev/null
@@ -1,7 +0,0 @@
-librt {
- GLIBC_2.3.3 {
- # Changed timer_t.
- timer_create; timer_delete; timer_getoverrun; timer_gettime;
- timer_settime;
- }
-}
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
index 57edbbbfb..e973bc5bf 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/bits/semaphore.h
@@ -33,9 +33,6 @@
/* Value returned if `sem_open' failed. */
#define SEM_FAILED ((sem_t *) 0)
-/* Maximum value the semaphore can have. */
-#define SEM_VALUE_MAX (2147483647)
-
typedef union
{
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
index 1e461ad41..b0d04c75b 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2006, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -19,39 +19,74 @@
#include <sysdep.h>
#include <pthread-errnos.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
.text
-#ifndef LOCK
-# ifdef UP
-# define LOCK
+#ifdef __ASSUME_PRIVATE_FUTEX
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg
+#else
+# if FUTEX_WAIT == 0
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg
+# else
+# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
+# endif
+# define LOAD_PRIVATE_FUTEX_WAKE(reg) \
+ movl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
+# if FUTEX_WAIT == 0
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg
# else
-# define LOCK lock
+# define LOAD_FUTEX_WAIT(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT, reg
# endif
+# define LOAD_FUTEX_WAIT_ABS(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
+# define LOAD_FUTEX_WAKE(reg) \
+ xorl $FUTEX_PRIVATE_FLAG, reg ; \
+ andl %fs:PRIVATE_FUTEX, reg ; \
+ orl $FUTEX_WAKE, reg
#endif
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
- .globl __lll_mutex_lock_wait
- .type __lll_mutex_lock_wait,@function
- .hidden __lll_mutex_lock_wait
+ .globl __lll_lock_wait_private
+ .type __lll_lock_wait_private,@function
+ .hidden __lll_lock_wait_private
.align 16
-__lll_mutex_lock_wait:
+__lll_lock_wait_private:
+ cfi_startproc
pushq %r10
+ cfi_adjust_cfa_offset(8)
pushq %rdx
-
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
xorq %r10, %r10 /* No timeout. */
movl $2, %edx
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ LOAD_PRIVATE_FUTEX_WAIT (%esi)
cmpl %edx, %eax /* NB: %edx == 2 */
jne 2f
@@ -66,33 +101,144 @@ __lll_mutex_lock_wait:
jnz 1b
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
retq
- .size __lll_mutex_lock_wait,.-__lll_mutex_lock_wait
-
+ cfi_endproc
+ .size __lll_lock_wait_private,.-__lll_lock_wait_private
#ifdef NOT_IN_libc
- .globl __lll_mutex_timedlock_wait
- .type __lll_mutex_timedlock_wait,@function
- .hidden __lll_mutex_timedlock_wait
+ .globl __lll_lock_wait
+ .type __lll_lock_wait,@function
+ .hidden __lll_lock_wait
+ .align 16
+__lll_lock_wait:
+ cfi_startproc
+ pushq %r10
+ cfi_adjust_cfa_offset(8)
+ pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r10, -16)
+ cfi_offset(%rdx, -24)
+ xorq %r10, %r10 /* No timeout. */
+ movl $2, %edx
+ LOAD_FUTEX_WAIT (%esi)
+
+ cmpl %edx, %eax /* NB: %edx == 2 */
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ syscall
+
+2: movl %edx, %eax
+ xchgl %eax, (%rdi) /* NB: lock is implied */
+
+ testl %eax, %eax
+ jnz 1b
+
+ popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
+ popq %r10
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r10)
+ retq
+ cfi_endproc
+ .size __lll_lock_wait,.-__lll_lock_wait
+
+ /* %rdi: futex
+ %rsi: flags
+ %rdx: timeout
+ %eax: futex value
+ */
+ .globl __lll_timedlock_wait
+ .type __lll_timedlock_wait,@function
+ .hidden __lll_timedlock_wait
.align 16
-__lll_mutex_timedlock_wait:
+__lll_timedlock_wait:
+ cfi_startproc
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+# endif
+
+ pushq %r9
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r9, 0)
+ movq %rdx, %r10
+ movl $0xffffffff, %r9d
+ LOAD_FUTEX_WAIT_ABS (%esi)
+
+ movl $2, %edx
+ cmpl %edx, %eax
+ jne 2f
+
+1: movl $SYS_futex, %eax
+ movl $2, %edx
+ syscall
+
+2: xchgl %edx, (%rdi) /* NB: lock is implied */
+
+ testl %edx, %edx
+ jz 3f
+
+ cmpl $-ETIMEDOUT, %eax
+ je 4f
+ cmpl $-EINVAL, %eax
+ jne 1b
+4: movl %eax, %edx
+ negl %edx
+
+3: movl %edx, %eax
+ popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
+ retq
+
+# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
/* Check for a valid timeout value. */
cmpq $1000000000, 8(%rdx)
jae 3f
pushq %r8
+ cfi_adjust_cfa_offset(8)
pushq %r9
+ cfi_adjust_cfa_offset(8)
pushq %r12
+ cfi_adjust_cfa_offset(8)
pushq %r13
+ cfi_adjust_cfa_offset(8)
pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r8, -16)
+ cfi_offset(%r9, -24)
+ cfi_offset(%r12, -32)
+ cfi_offset(%r13, -40)
+ cfi_offset(%r14, -48)
+ pushq %rsi
+ cfi_adjust_cfa_offset(8)
/* Stack frame for the timespec and timeval structs. */
- subq $16, %rsp
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
movq %rdi, %r12
movq %rdx, %r13
+ movl $2, %edx
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
1:
/* Get current time. */
movq %rsp, %rdi
@@ -114,118 +260,137 @@ __lll_mutex_timedlock_wait:
addq $1000000000, %rsi
decq %rdi
4: testq %rdi, %rdi
- js 5f /* Time is already up. */
+ js 2f /* Time is already up. */
- /* Futex call. */
- movq %rdi, (%rsp) /* Store relative timeout. */
+ /* Store relative timeout. */
+ movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
- movl $1, %eax
+ /* Futex call. */
movl $2, %edx
- LOCK
- cmpxchgl %edx, (%r12)
-
- testl %eax, %eax
- je 8f
-
+ movl $1, %eax
movq %rsp, %r10
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
- movl $FUTEX_WAIT, %esi
-#endif
+ movl 24(%rsp), %esi
+ LOAD_FUTEX_WAIT (%esi)
movq %r12, %rdi
movl $SYS_futex, %eax
syscall
- movq %rax, %rcx
-8: /* NB: %edx == 2 */
- xorl %eax, %eax
- LOCK
- cmpxchgl %edx, (%rdi)
- jnz 7f
+ /* NB: %edx == 2 */
+ xchgl %edx, (%r12)
+
+ testl %edx, %edx
+ je 6f
+
+ cmpl $-ETIMEDOUT, %eax
+ jne 1b
+2: movl $ETIMEDOUT, %edx
-6: addq $16, %rsp
+6: addq $32, %rsp
+ cfi_adjust_cfa_offset(-32)
popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
popq %r9
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r9)
popq %r8
- retq
-
- /* Check whether the time expired. */
-7: cmpq $-ETIMEDOUT, %rcx
- je 5f
-
- /* Make sure the current holder knows we are going to sleep. */
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r8)
movl %edx, %eax
- xchgl %eax, (%rdi)
- testl %eax, %eax
- jz 6b
- jmp 1b
+ retq
3: movl $EINVAL, %eax
retq
-
-5: movl $ETIMEDOUT, %eax
- jmp 6b
- .size __lll_mutex_timedlock_wait,.-__lll_mutex_timedlock_wait
+# endif
+ cfi_endproc
+ .size __lll_timedlock_wait,.-__lll_timedlock_wait
#endif
-#ifdef NOT_IN_libc
- .globl lll_unlock_wake_cb
- .type lll_unlock_wake_cb,@function
- .hidden lll_unlock_wake_cb
+ .globl __lll_unlock_wake_private
+ .type __lll_unlock_wake_private,@function
+ .hidden __lll_unlock_wake_private
.align 16
-lll_unlock_wake_cb:
+__lll_unlock_wake_private:
+ cfi_startproc
pushq %rsi
+ cfi_adjust_cfa_offset(8)
pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
- LOCK
- addl $1, (%rdi)
- jng 1f
+ movl $0, (%rdi)
+ LOAD_PRIVATE_FUTEX_WAKE (%esi)
+ movl $1, %edx /* Wake one thread. */
+ movl $SYS_futex, %eax
+ syscall
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
retq
- .size lll_unlock_wake_cb,.-lll_unlock_wake_cb
-#endif
-
+ cfi_endproc
+ .size __lll_unlock_wake_private,.-__lll_unlock_wake_private
- .globl __lll_mutex_unlock_wake
- .type __lll_mutex_unlock_wake,@function
- .hidden __lll_mutex_unlock_wake
+#ifdef NOT_IN_libc
+ .globl __lll_unlock_wake
+ .type __lll_unlock_wake,@function
+ .hidden __lll_unlock_wake
.align 16
-__lll_mutex_unlock_wake:
+__lll_unlock_wake:
+ cfi_startproc
pushq %rsi
+ cfi_adjust_cfa_offset(8)
pushq %rdx
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%rsi, -16)
+ cfi_offset(%rdx, -24)
movl $0, (%rdi)
- movl $FUTEX_WAKE, %esi
+ LOAD_FUTEX_WAKE (%esi)
movl $1, %edx /* Wake one thread. */
movl $SYS_futex, %eax
syscall
popq %rdx
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rdx)
popq %rsi
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%rsi)
retq
- .size __lll_mutex_unlock_wake,.-__lll_mutex_unlock_wake
+ cfi_endproc
+ .size __lll_unlock_wake,.-__lll_unlock_wake
-
-#ifdef NOT_IN_libc
.globl __lll_timedwait_tid
.type __lll_timedwait_tid,@function
.hidden __lll_timedwait_tid
.align 16
__lll_timedwait_tid:
+ cfi_startproc
pushq %r12
+ cfi_adjust_cfa_offset(8)
pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
movq %rdi, %r12
movq %rsi, %r13
subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
/* Get current time. */
2: movq %rsp, %rdi
@@ -255,6 +420,8 @@ __lll_timedwait_tid:
jz 4f
movq %rsp, %r10
+ /* XXX The kernel so far uses global futex for the wakeup at
+ all times. */
#if FUTEX_WAIT == 0
xorl %esi, %esi
#else
@@ -269,14 +436,21 @@ __lll_timedwait_tid:
4: xorl %eax, %eax
8: addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
retq
+ cfi_adjust_cfa_offset(32)
1: cmpq $-ETIMEDOUT, %rax
jne 2b
6: movl $ETIMEDOUT, %eax
jmp 8b
+ cfi_endproc
.size __lll_timedwait_tid,.-__lll_timedwait_tid
#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index c9f30e962..7c042fc80 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2004, 2006-2008, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -20,266 +20,541 @@
#ifndef _LOWLEVELLOCK_H
#define _LOWLEVELLOCK_H 1
-#include <time.h>
-#include <sys/param.h>
-#include <bits/pthreadtypes.h>
-#include <atomic.h>
-#include <sysdep.h>
-
-#ifndef LOCK_INSTR
-# ifdef UP
-# define LOCK_INSTR /* nothing */
-# else
-# define LOCK_INSTR "lock;"
+#ifndef __ASSEMBLER__
+# include <time.h>
+# include <sys/param.h>
+# include <bits/pthreadtypes.h>
+# include <bits/kernel-features.h>
+# include <tcb-offsets.h>
+
+# ifndef LOCK_INSTR
+# ifdef UP
+# define LOCK_INSTR /* nothing */
+# else
+# define LOCK_INSTR "lock;"
+# endif
+# endif
+#else
+# ifndef LOCK
+# ifdef UP
+# define LOCK
+# else
+# define LOCK lock
+# endif
# endif
#endif
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
+#define FUTEX_CMP_REQUEUE 4
+#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_WAIT_BITSET 9
+#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CLOCK_REALTIME 256
+
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+
+#define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
+
+/* Values for 'private' parameter of locking macros. Yes, the
+ definition seems to be backwards. But it is not. The bit will be
+ reversed before passing to the system call. */
+#define LLL_PRIVATE 0
+#define LLL_SHARED FUTEX_PRIVATE_FLAG
+
+#ifndef __ASSEMBLER__
+
+#if !defined NOT_IN_libc || defined IS_IN_rtld
+/* In libc.so or ld.so all futexes are private. */
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ ((fl) | FUTEX_PRIVATE_FLAG)
+# else
+# define __lll_private_flag(fl, private) \
+ ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
+# endif
+#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+# define __lll_private_flag(fl, private) \
+ (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
+# else
+# define __lll_private_flag(fl, private) \
+ (__builtin_constant_p (private) \
+ ? ((private) == 0 \
+ ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
+ : (fl)) \
+ : ({ unsigned int __fl = ((private) ^ FUTEX_PRIVATE_FLAG); \
+ __asm__ ("andl %%fs:%P1, %0" : "+r" (__fl) \
+ : "i" (offsetof (struct pthread, header.private_futex))); \
+ __fl | (fl); }))
+# endif
+#endif
-
-/* Initializer for compatibility lock. */
-#define LLL_MUTEX_LOCK_INITIALIZER (0)
-#define LLL_MUTEX_LOCK_INITIALIZER_LOCKED (1)
-#define LLL_MUTEX_LOCK_INITIALIZER_WAITERS (2)
+/* Initializer for lock. */
+#define LLL_LOCK_INITIALIZER (0)
+#define LLL_LOCK_INITIALIZER_LOCKED (1)
+#define LLL_LOCK_INITIALIZER_WAITERS (2)
/* Delay in spinlock loop. */
-#define BUSY_WAIT_NOP __asm__ ("rep; nop")
-
-
-#define lll_futex_wait(futex, val) \
- do { \
- int __ignore; \
+#define BUSY_WAIT_NOP __asm__ ("rep; nop")
+
+
+#define LLL_STUB_UNWIND_INFO_START \
+ ".section .eh_frame,\"a\",@progbits\n" \
+"7:\t" ".long 9f-8f # Length of Common Information Entry\n" \
+"8:\t" ".long 0x0 # CIE Identifier Tag\n\t" \
+ ".byte 0x1 # CIE Version\n\t" \
+ ".ascii \"zR\\0\" # CIE Augmentation\n\t" \
+ ".uleb128 0x1 # CIE Code Alignment Factor\n\t" \
+ ".sleb128 -8 # CIE Data Alignment Factor\n\t" \
+ ".byte 0x10 # CIE RA Column\n\t" \
+ ".uleb128 0x1 # Augmentation size\n\t" \
+ ".byte 0x1b # FDE Encoding (pcrel sdata4)\n\t" \
+ ".byte 0x12 # DW_CFA_def_cfa_sf\n\t" \
+ ".uleb128 0x7\n\t" \
+ ".sleb128 16\n\t" \
+ ".align 8\n" \
+"9:\t" ".long 23f-10f # FDE Length\n" \
+"10:\t" ".long 10b-7b # FDE CIE offset\n\t" \
+ ".long 1b-. # FDE initial location\n\t" \
+ ".long 6b-1b # FDE address range\n\t" \
+ ".uleb128 0x0 # Augmentation size\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 12f-11f\n" \
+"11:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-1b\n"
+#define LLL_STUB_UNWIND_INFO_END \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 14f-13f\n" \
+"13:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-2b\n" \
+"14:\t" ".byte 0x40 + (3b-2b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 0\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 16f-15f\n" \
+"15:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-3b\n" \
+"16:\t" ".byte 0x40 + (4b-3b-1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x0e # DW_CFA_def_cfa_offset\n\t" \
+ ".uleb128 128\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 20f-17f\n" \
+"17:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 19f-18f\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"18:\t" ".4byte 4b-.\n\t" \
+ ".byte 0x1c # DW_OP_minus\n\t" \
+ ".byte 0x0d # DW_OP_const4s\n" \
+"19:\t" ".4byte 24f-.\n\t" \
+ ".byte 0x22 # DW_OP_plus\n" \
+"20:\t" ".byte 0x40 + (5b-4b+1) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x13 # DW_CFA_def_cfa_offset_sf\n\t" \
+ ".sleb128 16\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 22f-21f\n" \
+"21:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-5b\n" \
+"22:\t" ".align 8\n" \
+"23:\t" ".previous\n"
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_5 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (2b-1b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+/* Unwind info for
+ 1: leaq ..., %rdi
+ 0: movq ..., %rdx
+ 2: subq $128, %rsp
+ 3: callq ...
+ 4: addq $128, %rsp
+ 5: jmp 24f
+ 6:
+ snippet. */
+#define LLL_STUB_UNWIND_INFO_6 \
+LLL_STUB_UNWIND_INFO_START \
+"12:\t" ".byte 0x40 + (0b-1b) # DW_CFA_advance_loc\n\t" \
+ ".byte 0x16 # DW_CFA_val_expression\n\t" \
+ ".uleb128 0x10\n\t" \
+ ".uleb128 26f-25f\n" \
+"25:\t" ".byte 0x80 # DW_OP_breg16\n\t" \
+ ".sleb128 4b-0b\n" \
+"26:\t" ".byte 0x40 + (2b-0b) # DW_CFA_advance_loc\n\t" \
+LLL_STUB_UNWIND_INFO_END
+
+
+#define lll_futex_wait(futex, val, private) \
+ lll_futex_timed_wait(futex, val, NULL, private)
+
+
+#define lll_futex_timed_wait(futex, val, timeout, private) \
+ ({ \
+ register const struct timespec *__to __asm__ ("r10") = timeout; \
+ int __status; \
register __typeof (val) _val __asm__ ("edx") = (val); \
- __asm__ __volatile ("xorq %%r10, %%r10\n\t" \
- "syscall" \
- : "=a" (__ignore) \
- : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAIT), \
- "d" (_val) \
- : "memory", "cc", "r10", "r11", "cx"); \
- } while (0)
+ __asm__ __volatile ("syscall" \
+ : "=a" (__status) \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAIT, private)), \
+ "d" (_val), "r" (__to) \
+ : "memory", "cc", "r11", "cx"); \
+ __status; \
+ })
-#define lll_futex_wake(futex, nr) \
+#define lll_futex_wake(futex, nr, private) \
do { \
int __ignore; \
register __typeof (nr) _nr __asm__ ("edx") = (nr); \
__asm__ __volatile ("syscall" \
: "=a" (__ignore) \
- : "0" (SYS_futex), "D" (futex), "S" (FUTEX_WAKE), \
+ : "0" (SYS_futex), "D" (futex), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
"d" (_nr) \
: "memory", "cc", "r10", "r11", "cx"); \
} while (0)
-/* Does not preserve %eax and %ecx. */
-extern int __lll_mutex_lock_wait (int *__futex, int __val) attribute_hidden;
-/* Does not preserver %eax, %ecx, and %edx. */
-extern int __lll_mutex_timedlock_wait (int *__futex, int __val,
- const struct timespec *__abstime)
- attribute_hidden;
-/* Preserves all registers but %eax. */
-extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
-
-
-/* NB: in the lll_mutex_trylock macro we simply return the value in %eax
+/* NB: in the lll_trylock macro we simply return the value in %eax
after the cmpxchg instruction. In case the operation succeded this
value is zero. In case the operation failed, the cmpxchg instruction
has loaded the current value of the memory work which is guaranteed
to be nonzero. */
-#define lll_mutex_trylock(futex) \
+#if defined NOT_IN_libc || defined UP
+# define __lll_trylock_asm LOCK_INSTR "cmpxchgl %2, %1"
+#else
+# define __lll_trylock_asm "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %2, %1\n\t" \
+ "jmp 1f\n\t" \
+ "0:\tcmpxchgl %2, %1\n\t" \
+ "1:"
+#endif
+
+#define lll_trylock(futex) \
({ int ret; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ __asm__ __volatile (__lll_trylock_asm \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex), \
+ "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
-
-#define lll_mutex_cond_trylock(futex) \
+#define lll_robust_trylock(futex, id) \
({ int ret; \
__asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
: "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_WAITERS), \
- "m" (futex), "0" (LLL_MUTEX_LOCK_INITIALIZER) \
+ : "r" (id), "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
: "memory"); \
ret; })
+#define lll_cond_trylock(futex) \
+ ({ int ret; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1" \
+ : "=a" (ret), "=m" (futex) \
+ : "r" (LLL_LOCK_INITIALIZER_WAITERS), \
+ "m" (futex), "0" (LLL_LOCK_INITIALIZER) \
+ : "memory"); \
+ ret; })
-#define lll_mutex_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_cond_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t" \
+#if defined NOT_IN_libc || defined UP
+# define __lll_lock_asm_start LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#else
+# define __lll_lock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; cmpxchgl %4, %2\n\t" \
"jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (2), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_timedlock(futex, timeout) \
- ({ int _result, ignore1, ignore2, ignore3; \
- __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t" \
+ "jmp 24f\n" \
+ "0:\tcmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t"
+#endif
+
+#define lll_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=&D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "0" (1), "m" (futex), "3" (0) \
+ : "cx", "r11", "cc", "memory"); \
+ else \
+ __asm__ __volatile (__lll_lock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_lock_%=, @function\n" \
+ "_L_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (1), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ }) \
+
+#define lll_robust_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_lock_%=, @function\n" \
+ "_L_robust_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (result) \
+ : "1" (id), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ result; })
+
+#define lll_cond_lock(futex, private) \
+ (void) \
+ ({ int ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_cond_lock_%=, @function\n" \
+ "_L_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (ignore3) \
+ : "1" (2), "m" (futex), "3" (0), "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_cond_lock(futex, id, private) \
+ ({ int result, ignore1, ignore2; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_cond_lock_%=, @function\n" \
+ "_L_robust_cond_lock_%=:\n" \
+ "1:\tleaq %2, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_lock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_cond_lock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
+ "=a" (result) \
+ : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
+ "0" (private) \
+ : "cx", "r11", "cc", "memory"); \
+ result; })
+
+#define lll_timedlock(futex, timeout, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
"jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %4, %%rdi\n\t" \
- "movq %8, %%rdx\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_timedlock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_timedlock_%=, @function\n" \
+ "_L_timedlock_%=:\n" \
+ "1:\tleaq %4, %%rdi\n" \
+ "0:\tmovq %8, %%rdx\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_timedlock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_timedlock_%=, 6b-1b\n\t" \
".previous\n" \
- "2:" \
- : "=a" (_result), "=&D" (ignore1), "=S" (ignore2), \
+ LLL_STUB_UNWIND_INFO_6 \
+ "24:" \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
"=&d" (ignore3), "=m" (futex) \
- : "0" (0), "2" (1), "m" (futex), "m" (timeout) \
+ : "0" (0), "1" (1), "m" (futex), "m" (timeout), \
+ "2" (private) \
: "memory", "cx", "cc", "r10", "r11"); \
- _result; })
-
-
-#define lll_mutex_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile (LOCK_INSTR "decl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %0, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_unlock_wake\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
-
-
-#define lll_mutex_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
-
-
-/* We have a separate internal lock implementation which is not tied
- to binary compatibility. */
-
-/* Type for lock object. */
-typedef int lll_lock_t;
-
-/* Initializers for lock. */
-#define LLL_LOCK_INITIALIZER (0)
-#define LLL_LOCK_INITIALIZER_LOCKED (1)
-
-
-extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
-
-
-/* The states of a lock are:
- 0 - untaken
- 1 - taken by one user
- 2 - taken by more users */
+ result; })
+#define lll_robust_timedlock(futex, timeout, id, private) \
+ ({ int result, ignore1, ignore2, ignore3; \
+ __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
+ "jnz 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_timedlock_%=, @function\n" \
+ "_L_robust_timedlock_%=:\n" \
+ "1:\tleaq %4, %%rdi\n" \
+ "0:\tmovq %8, %%rdx\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_robust_timedlock_wait\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_timedlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_6 \
+ "24:" \
+ : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
+ "=&d" (ignore3), "=m" (futex) \
+ : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
+ "2" (private) \
+ : "memory", "cx", "cc", "r10", "r11"); \
+ result; })
#if defined NOT_IN_libc || defined UP
-# define lll_trylock(futex) lll_mutex_trylock (futex)
-# define lll_lock(futex) lll_mutex_lock (futex)
-# define lll_unlock(futex) lll_mutex_unlock (futex)
+# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
+ "jne 1f\n\t"
#else
-/* Special versions of the macros for use in libc itself. They avoid
- the lock prefix when the thread library is not used.
-
- The code sequence to avoid unnecessary lock prefixes is what the AMD
- guys suggested. If you do not like it, bring it up with AMD.
-
- XXX In future we might even want to avoid it on UP machines. */
-
-# define lll_trylock(futex) \
- ({ unsigned char ret; \
- __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %2, %1\n\t" \
- "jmp 1f\n" \
- "0:\tcmpxchgl %2, %1\n\t" \
- "1:setne %0" \
- : "=a" (ret), "=m" (futex) \
- : "r" (LLL_MUTEX_LOCK_INITIALIZER_LOCKED), "m" (futex),\
- "0" (LLL_MUTEX_LOCK_INITIALIZER) \
- : "memory"); \
- ret; })
-
-
-# define lll_lock(futex) \
- (void) ({ int ignore1, ignore2, ignore3; \
- __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; cmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- "jmp 2f\n" \
- "0:\tcmpxchgl %0, %2\n\t" \
- "jnz 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %2, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_lock_wait\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),\
- "=a" (ignore3) \
- : "0" (1), "m" (futex), "3" (0) \
- : "cx", "r11", "cc", "memory"); })
-
-
-# define lll_unlock(futex) \
- (void) ({ int ignore; \
- __asm__ __volatile ("cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
- "je 0f\n\t" \
- "lock; decl %0\n\t" \
- "jne 1f\n\t" \
- "jmp 2f\n" \
- "0:\tdecl %0\n\t" \
- "jne 1f\n\t" \
- ".subsection 1\n" \
- "1:\tleaq %0, %%rdi\n\t" \
- "subq $128, %%rsp\n\t" \
- "callq __lll_mutex_unlock_wake\n\t" \
- "addq $128, %%rsp\n\t" \
- "jmp 2f\n\t" \
- ".previous\n" \
- "2:" \
- : "=m" (futex), "=&D" (ignore) \
- : "m" (futex) \
- : "ax", "cx", "r11", "cc", "memory"); })
+# define __lll_unlock_asm_start "cmpl $0, __libc_multiple_threads(%%rip)\n\t" \
+ "je 0f\n\t" \
+ "lock; decl %0\n\t" \
+ "jne 1f\n\t" \
+ "jmp 24f\n\t" \
+ "0:\tdecl %0\n\t" \
+ "jne 1f\n\t"
#endif
+#define lll_unlock(futex, private) \
+ (void) \
+ ({ int ignore; \
+ if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
+ __asm__ __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake_private\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ else \
+ __asm__ __volatile (__lll_unlock_asm_start \
+ ".subsection 1\n\t" \
+ ".type _L_unlock_%=, @function\n" \
+ "_L_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "m" (futex), "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ })
+
+#define lll_robust_unlock(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t" \
+ "jne 1f\n\t" \
+ ".subsection 1\n\t" \
+ ".type _L_robust_unlock_%=, @function\n" \
+ "_L_robust_unlock_%=:\n" \
+ "1:\tleaq %0, %%rdi\n" \
+ "2:\tsubq $128, %%rsp\n" \
+ "3:\tcallq __lll_unlock_wake\n" \
+ "4:\taddq $128, %%rsp\n" \
+ "5:\tjmp 24f\n" \
+ "6:\t.size _L_robust_unlock_%=, 6b-1b\n\t" \
+ ".previous\n" \
+ LLL_STUB_UNWIND_INFO_5 \
+ "24:" \
+ : "=m" (futex), "=&D" (ignore) \
+ : "i" (FUTEX_WAITERS), "m" (futex), \
+ "S" (private) \
+ : "ax", "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+#define lll_robust_dead(futex, private) \
+ do \
+ { \
+ int ignore; \
+ __asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t" \
+ "syscall" \
+ : "=m" (futex), "=a" (ignore) \
+ : "D" (&(futex)), "i" (FUTEX_OWNER_DIED), \
+ "S" (__lll_private_flag (FUTEX_WAKE, private)), \
+ "1" (__NR_futex), "d" (1) \
+ : "cx", "r11", "cc", "memory"); \
+ } \
+ while (0)
+
+/* Returns non-zero if error happened, zero if success. */
+#define lll_futex_requeue(ftx, nr_wake, nr_move, mutex, val, private) \
+ ({ int __res; \
+ register int __nr_move __asm__ ("r10") = nr_move; \
+ register void *__mutex __asm__ ("r8") = mutex; \
+ register int __val __asm__ ("r9") = val; \
+ __asm__ __volatile ("syscall" \
+ : "=a" (__res) \
+ : "0" (__NR_futex), "D" ((void *) ftx), \
+ "S" (__lll_private_flag (FUTEX_CMP_REQUEUE, \
+ private)), "d" (nr_wake), \
+ "r" (__nr_move), "r" (__mutex), "r" (__val) \
+ : "cx", "r11", "cc", "memory"); \
+ __res < 0; })
#define lll_islocked(futex) \
- (futex != LLL_MUTEX_LOCK_INITIALIZER)
+ (futex != LLL_LOCK_INITIALIZER)
/* The kernel notifies a process with uses CLONE_CLEARTID via futex
@@ -318,25 +593,6 @@ extern int __lll_timedwait_tid (int *tid, const struct timespec *abstime)
} \
__result; })
-
-/* Conditional variable handling. */
-
-extern void __lll_cond_wait (pthread_cond_t *cond) attribute_hidden;
-extern int __lll_cond_timedwait (pthread_cond_t *cond,
- const struct timespec *abstime)
- attribute_hidden;
-extern void __lll_cond_wake (pthread_cond_t *cond) attribute_hidden;
-extern void __lll_cond_broadcast (pthread_cond_t *cond) attribute_hidden;
-
-
-#define lll_cond_wait(cond) \
- __lll_cond_wait (cond)
-#define lll_cond_timedwait(cond, abstime) \
- __lll_cond_timedwait (cond, abstime)
-#define lll_cond_wake(cond) \
- __lll_cond_wake (cond)
-#define lll_cond_broadcast(cond) \
- __lll_cond_broadcast (cond)
-
+#endif /* !__ASSEMBLER__ */
#endif /* lowlevellock.h */
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
index c20ef73e7..df4949615 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pt-vfork.S
@@ -16,6 +16,8 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+#include <tcb-offsets.h>
+
#define SAVE_PID \
movl %fs:PID, %esi; \
movl %esi, %edx; \
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
index f6e15a2d7..15ad534fa 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_barrier_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelbarrier.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
.text
@@ -64,9 +56,10 @@ pthread_barrier_wait:
if the CURR_EVENT memory has meanwhile been changed. */
7:
#if FUTEX_WAIT == 0
- xorl %esi, %esi
+ movl PRIVATE(%rdi), %esi
#else
movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
#endif
xorq %r10, %r10
8: movl $SYS_futex, %eax
@@ -115,6 +108,7 @@ pthread_barrier_wait:
so 0x7fffffff is the highest value. */
movl $0x7fffffff, %edx
movl $FUTEX_WAKE, %esi
+ orl PRIVATE(%rdi), %esi
movl $SYS_futex, %eax
syscall
@@ -139,21 +133,29 @@ pthread_barrier_wait:
retq
-1: addq $MUTEX, %rdi
- callq __lll_mutex_lock_wait
+1: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_lock_wait
subq $MUTEX, %rdi
jmp 2b
-4: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+4: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 5b
-6: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+6: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
subq $MUTEX, %rdi
jmp 7b
-9: addq $MUTEX, %rdi
- callq __lll_mutex_unlock_wake
+9: movl PRIVATE(%rdi), %esi
+ addq $MUTEX, %rdi
+ xorl $LLL_SHARED, %esi
+ callq __lll_unlock_wake
jmp 10b
.size pthread_barrier_wait,.-pthread_barrier_wait
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
index d8ebdfab8..0f8037ba2 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S
@@ -1,4 +1,5 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,21 +19,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_CMP_REQUEUE 4
-
-#define EINVAL 22
+#include <pthread-pi-defines.h>
+#include <pthread-errnos.h>
.text
@@ -78,8 +69,23 @@ __pthread_cond_broadcast:
8: cmpq $-1, %r8
je 9f
+ /* Do not use requeue for pshared condvars. */
+ testl $PS_BIT, MUTEX_KIND(%r8)
+ jne 9f
+
+ /* Requeue to a PI mutex if the PI bit is set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ je 81f
+
/* Wake up all threads. */
- movl $FUTEX_CMP_REQUEUE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi
+#else
+ movl %fs:PRIVATE_FUTEX, %esi
+ orl $FUTEX_CMP_REQUEUE, %esi
+#endif
movl $SYS_futex, %eax
movl $1, %edx
movl $0x7fffffff, %r10d
@@ -94,6 +100,20 @@ __pthread_cond_broadcast:
10: xorl %eax, %eax
retq
+ /* Wake up all threads. */
+81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ movl $SYS_futex, %eax
+ movl $1, %edx
+ movl $0x7fffffff, %r10d
+ syscall
+
+ /* For any kind of error, which mainly is EAGAIN, we try again
+ with WAKE. The general test also covers running on old
+ kernels. */
+ cmpq $-4095, %rax
+ jb 10b
+ jmp 9f
+
.align 16
/* Unlock. */
4: LOCK
@@ -108,7 +128,11 @@ __pthread_cond_broadcast:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -116,21 +140,38 @@ __pthread_cond_broadcast:
/* Unlock in loop requires wakeup. */
5: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
jmp 6b
/* Unlock in loop requires wakeup. */
7: addq $cond_lock-cond_futex, %rdi
- callq __lll_mutex_unlock_wake
+ cmpq $-1, %r8
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
subq $cond_lock-cond_futex, %rdi
jmp 8b
9: /* The futex requeue functionality is not available. */
+ cmpq $-1, %r8
movl $0x7fffffff, %edx
- movl $FUTEX_WAKE, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
movl $SYS_futex, %eax
syscall
jmp 10b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
weak_alias(__pthread_cond_broadcast, pthread_cond_broadcast)
-
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
index c7cc3ddd8..568c98470 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,20 +18,11 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
#include <bits/kernel-features.h>
-
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-
-#define EINVAL 22
+#include <pthread-errnos.h>
.text
@@ -64,9 +55,66 @@ __pthread_cond_signal:
addl $1, (%rdi)
/* Wake up one thread. */
- movl $FUTEX_WAKE, %esi
- movl $SYS_futex, %eax
+ cmpq $-1, dep_mutex(%r8)
+ movl $FUTEX_WAKE_OP, %esi
movl $1, %edx
+ movl $SYS_futex, %eax
+ je 8f
+
+ /* Get the address of the mutex used. */
+ movq dep_mutex(%r8), %rcx
+ movl MUTEX_KIND(%rcx), %r11d
+ andl $(ROBUST_BIT|PI_BIT), %r11d
+ cmpl $PI_BIT, %r11d
+ je 9f
+
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi
+#else
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+
+8: movl $1, %r10d
+#if cond_lock != 0
+ addq $cond_lock, %r8
+#endif
+ movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d
+ syscall
+#if cond_lock != 0
+ subq $cond_lock, %r8
+#endif
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpq $-4095, %rax
+ jae 7f
+
+ xorl %eax, %eax
+ retq
+
+ /* Wake up one thread and requeue none in the PI Mutex case. */
+9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ movq %rcx, %r8
+ xorq %r10, %r10
+ movl (%rdi), %r9d // XXX Can this be right?
+ syscall
+
+ leaq -cond_futex(%rdi), %r8
+
+ /* For any kind of error, we try again with WAKE.
+ The general test also covers running on old kernels. */
+ cmpq $-4095, %rax
+ jb 4f
+
+7:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ andl $FUTEX_PRIVATE_FLAG, %esi
+#else
+ andl %fs:PRIVATE_FUTEX, %esi
+#endif
+ orl $FUTEX_WAKE, %esi
+ movl $SYS_futex, %eax
+ /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall.
+ movl $1, %edx */
syscall
/* Unlock. */
@@ -86,7 +134,11 @@ __pthread_cond_signal:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -95,7 +147,14 @@ __pthread_cond_signal:
/* Unlock in loop requires wakeup. */
5:
movq %r8, %rdi
- callq __lll_mutex_unlock_wake
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
jmp 6b
.size __pthread_cond_signal, .-__pthread_cond_signal
weak_alias(__pthread_cond_signal, pthread_cond_signal)
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index f0dcdb750..427a723cb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,18 +18,12 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
+#include <pthread-pi-defines.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <bits/kernel-features.h>
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
@@ -37,6 +31,7 @@
.text
+
/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime) */
.globl __pthread_cond_timedwait
@@ -44,38 +39,57 @@
.align 16
__pthread_cond_timedwait:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
pushq %r12
-.Lpush_r12:
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
pushq %r13
-.Lpush_r13:
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
pushq %r14
-.Lpush_r14:
-#define FRAME_SIZE 80
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+ pushq %r15
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r15, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE 32
+#else
+# define FRAME_SIZE 48
+#endif
subq $FRAME_SIZE, %rsp
-.Lsubq:
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+ cfi_remember_state
cmpq $1000000000, 8(%rdx)
movl $EINVAL, %eax
- jae 18f
+ jae 48f
/* Stack frame:
- rsp + 80
- +--------------------------+
- rsp + 48 | cleanup buffer |
- +--------------------------+
- rsp + 40 | old wake_seq value |
- +--------------------------+
- rsp + 24 | timeout value |
- +--------------------------+
+ rsp + 48
+ +--------------------------+
+ rsp + 32 | timeout value |
+ +--------------------------+
+ rsp + 24 | old wake_seq value |
+ +--------------------------+
rsp + 16 | mutex pointer |
- +--------------------------+
+ +--------------------------+
rsp + 8 | condvar pointer |
- +--------------------------+
+ +--------------------------+
rsp + 4 | old broadcast_seq value |
- +--------------------------+
+ +--------------------------+
rsp + 0 | old cancellation mode |
- +--------------------------+
+ +--------------------------+
*/
cmpq $-1, dep_mutex(%rdi)
@@ -88,8 +102,18 @@ __pthread_cond_timedwait:
je 22f
movq %rsi, dep_mutex(%rdi)
+22:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
/* Get internal lock. */
-22: movl $1, %esi
+ movl $1, %esi
xorl %eax, %eax
LOCK
#if cond_lock == 0
@@ -97,89 +121,29 @@ __pthread_cond_timedwait:
#else
cmpxchgl %esi, cond_lock(%rdi)
#endif
- jnz 1f
+ jnz 31f
/* Unlock the mutex. */
-2: movq 16(%rsp), %rdi
+32: movq 16(%rsp), %rdi
xorl %esi, %esi
callq __pthread_mutex_unlock_usercnt
testl %eax, %eax
- jne 16f
+ jne 46f
movq 8(%rsp), %rdi
incq total_seq(%rdi)
incl cond_futex(%rdi)
- addl $(1 << clock_bits), cond_nwaiters(%rdi)
-
- /* Install cancellation handler. */
-#ifdef __PIC__
- leaq __condvar_cleanup(%rip), %rsi
-#else
- leaq __condvar_cleanup, %rsi
-#endif
- leaq 48(%rsp), %rdi
- movq %rsp, %rdx
- callq __pthread_cleanup_push
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
/* Get and store current wakeup_seq value. */
movq 8(%rsp), %rdi
movq wakeup_seq(%rdi), %r9
movl broadcast_seq(%rdi), %edx
- movq %r9, 40(%rsp)
+ movq %r9, 24(%rsp)
movl %edx, 4(%rsp)
- /* Get the current time. */
-8:
-#ifdef __NR_clock_gettime
- /* Get the clock number. Note that the field in the condvar
- structure stores the number minus 1. */
- movq 8(%rsp), %rdi
- movl cond_nwaiters(%rdi), %edi
- andl $((1 << clock_bits) - 1), %edi
- /* Only clocks 0 and 1 are allowed so far. Both are handled in the
- kernel. */
- leaq 24(%rsp), %rsi
- movl $__NR_clock_gettime, %eax
- syscall
-# ifndef __ASSUME_POSIX_TIMERS
- cmpq $-ENOSYS, %rax
- je 19f
-# endif
-
- /* Compute relative timeout. */
- movq (%r13), %rcx
- movq 8(%r13), %rdx
- subq 24(%rsp), %rcx
- subq 32(%rsp), %rdx
-#else
- leaq 24(%rsp), %rdi
- xorl %esi, %esi
- movq $VSYSCALL_ADDR_vgettimeofday, %rax
- callq *%rax
-
- /* Compute relative timeout. */
- movq 32(%rsp), %rax
- movl $1000, %edx
- mul %rdx /* Milli seconds to nano seconds. */
- movq (%r13), %rcx
- movq 8(%r13), %rdx
- subq 24(%rsp), %rcx
- subq %rax, %rdx
-#endif
- jns 12f
- addq $1000000000, %rdx
- decq %rcx
-12: testq %rcx, %rcx
- movq 8(%rsp), %rdi
- movq $-ETIMEDOUT, %r14
- js 6f
-
- /* Store relative timeout. */
-21: movq %rcx, 24(%rsp)
- movq %rdx, 32(%rsp)
-
- movl cond_futex(%rdi), %r12d
+38: movl cond_futex(%rdi), %r12d
/* Unlock. */
LOCK
@@ -188,25 +152,67 @@ __pthread_cond_timedwait:
#else
decl cond_lock(%rdi)
#endif
- jne 3f
+ jne 33f
-4: callq __pthread_enable_asynccancel
+.LcleanupSTART1:
+34: callq __pthread_enable_asynccancel
movl %eax, (%rsp)
- leaq 24(%rsp), %r10
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
+ movq %r13, %r10
+ movl $FUTEX_WAIT_BITSET, %esi
+ cmpq $-1, dep_mutex(%rdi)
+ je 60f
+
+ movq dep_mutex(%rdi), %r8
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 61f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
+ xorl %eax, %eax
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%rdi)
+ movl $FUTEX_CLOCK_REALTIME, %edx
+ cmove %edx, %eax
+ orl %eax, %esi
+ movq %r12, %rdx
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+
+ movl $1, %r15d
+#ifdef __ASSUME_REQUEUE_PI
+ jmp 62f
#else
- movl $FUTEX_WAIT, %esi
+ cmpq $-4095, %rax
+ jnae 62f
+
+ subq $cond_futex, %rdi
#endif
+
+61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
+60: xorl %r15d, %r15d
+ xorl %eax, %eax
+ /* The following only works like this because we only support
+ two clocks, represented using a single bit. */
+ testl $1, cond_nwaiters(%rdi)
+ movl $FUTEX_CLOCK_REALTIME, %edx
+ movl $0xffffffff, %r9d
+ cmove %edx, %eax
+ orl %eax, %esi
movq %r12, %rdx
addq $cond_futex, %rdi
movl $SYS_futex, %eax
syscall
- movq %rax, %r14
+62: movq %rax, %r14
movl (%rsp), %edi
callq __pthread_disable_asynccancel
+.LcleanupEND1:
/* Lock. */
movq 8(%rsp), %rdi
@@ -218,120 +224,158 @@ __pthread_cond_timedwait:
#else
cmpxchgl %esi, cond_lock(%rdi)
#endif
- jne 5f
+ jne 35f
-6: movl broadcast_seq(%rdi), %edx
+36: movl broadcast_seq(%rdi), %edx
movq woken_seq(%rdi), %rax
movq wakeup_seq(%rdi), %r9
cmpl 4(%rsp), %edx
- jne 23f
+ jne 53f
- cmpq 40(%rsp), %r9
- jbe 15f
+ cmpq 24(%rsp), %r9
+ jbe 45f
cmpq %rax, %r9
- ja 9f
+ ja 39f
-15: cmpq $-ETIMEDOUT, %r14
- jne 8b
+45: cmpq $-ETIMEDOUT, %r14
+ jne 38b
-13: incq wakeup_seq(%rdi)
+99: incq wakeup_seq(%rdi)
incl cond_futex(%rdi)
movl $ETIMEDOUT, %r14d
- jmp 14f
+ jmp 44f
-23: xorq %r14, %r14
- jmp 24f
+53: xorq %r14, %r14
+ jmp 54f
-9: xorq %r14, %r14
-14: incq woken_seq(%rdi)
+39: xorq %r14, %r14
+44: incq woken_seq(%rdi)
-24: subl $(1 << clock_bits), cond_nwaiters(%rdi)
+54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
/* Wake up a thread which wants to destroy the condvar object. */
cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 25f
+ jne 55f
movl cond_nwaiters(%rdi), %eax
- andl $~((1 << clock_bits) - 1), %eax
- jne 25f
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 55f
addq $cond_nwaiters, %rdi
- movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %esi
+ cmpq $-1, dep_mutex-cond_nwaiters(%rdi)
movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
syscall
subq $cond_nwaiters, %rdi
-25: LOCK
+55: LOCK
#if cond_lock == 0
decl (%rdi)
#else
decl cond_lock(%rdi)
#endif
- jne 10f
+ jne 40f
- /* Remove cancellation handler. */
-11: movq 48+CLEANUP_PREV(%rsp), %rdx
- movq %rdx, %fs:CLEANUP
+ /* If requeue_pi is used the kernel performs the locking of the
+ mutex. */
+41: movq 16(%rsp), %rdi
+ testl %r15d, %r15d
+ jnz 64f
- movq 16(%rsp), %rdi
callq __pthread_mutex_cond_lock
- testq %rax, %rax
+63: testq %rax, %rax
cmoveq %r14, %rax
-18: addq $FRAME_SIZE, %rsp
-.Laddq:
+48: addq $FRAME_SIZE, %rsp
+ cfi_adjust_cfa_offset(-FRAME_SIZE)
+ popq %r15
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r15)
popq %r14
-.Lpop_r14:
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
popq %r13
-.Lpop_r13:
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
-.Lpop_r12:
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
retq
+ cfi_restore_state
+
+64: callq __pthread_mutex_cond_lock_adjust
+ movq %r14, %rax
+ jmp 48b
+
/* Initial locking failed. */
-1:
-.LSbl1:
+31:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
- jmp 2b
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+ jmp 32b
/* Unlock in loop requires wakeup. */
-3:
+33:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
- jmp 4b
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 34b
/* Locking in loop failed. */
-5:
+35:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
- jmp 6b
+ jmp 36b
/* Unlock after loop requires wakeup. */
-10:
+40:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
- jmp 11b
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 41b
/* The initial unlocking of the mutex failed. */
-16: movq 8(%rsp), %rdi
+46: movq 8(%rsp), %rdi
movq %rax, (%rsp)
LOCK
#if cond_lock == 0
@@ -339,30 +383,239 @@ __pthread_cond_timedwait:
#else
decl cond_lock(%rdi)
#endif
- jne 17f
+ jne 47f
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+
+47: movq (%rsp), %rax
+ jmp 48b
+
-17: movq (%rsp), %rax
- jmp 18b
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ xorl %r15d, %r15d
-#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
+ /* Get internal lock. */
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+# if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+# else
+ cmpxchgl %esi, cond_lock(%rdi)
+# endif
+ jnz 1f
+
+ /* Unlock the mutex. */
+2: movq 16(%rsp), %rdi
+ xorl %esi, %esi
+ callq __pthread_mutex_unlock_usercnt
+
+ testl %eax, %eax
+ jne 46b
+
+ movq 8(%rsp), %rdi
+ incq total_seq(%rdi)
+ incl cond_futex(%rdi)
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Get and store current wakeup_seq value. */
+ movq 8(%rsp), %rdi
+ movq wakeup_seq(%rdi), %r9
+ movl broadcast_seq(%rdi), %edx
+ movq %r9, 24(%rsp)
+ movl %edx, 4(%rsp)
+
+ /* Get the current time. */
+8:
+# ifdef __NR_clock_gettime
+ /* Get the clock number. Note that the field in the condvar
+ structure stores the number minus 1. */
+ movq 8(%rsp), %rdi
+ movl cond_nwaiters(%rdi), %edi
+ andl $((1 << nwaiters_shift) - 1), %edi
+ /* Only clocks 0 and 1 are allowed so far. Both are handled in the
+ kernel. */
+ leaq 32(%rsp), %rsi
+# ifdef SHARED
+ movq __vdso_clock_gettime@GOTPCREL(%rip), %rax
+ movq (%rax), %rax
+ PTR_DEMANGLE (%rax)
+ jz 26f
+ call *%rax
+ jmp 27f
+# endif
+26: movl $__NR_clock_gettime, %eax
+ syscall
+27:
+# ifndef __ASSUME_POSIX_TIMERS
+ cmpq $-ENOSYS, %rax
+ je 19f
+# endif
+
+ /* Compute relative timeout. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 32(%rsp), %rcx
+ subq 40(%rsp), %rdx
+# else
+ leaq 24(%rsp), %rdi
+ xorl %esi, %esi
+ movq $VSYSCALL_ADDR_vgettimeofday, %rax
+ callq *%rax
+
+ /* Compute relative timeout. */
+ movq 40(%rsp), %rax
+ movl $1000, %edx
+ mul %rdx /* Milli seconds to nano seconds. */
+ movq (%r13), %rcx
+ movq 8(%r13), %rdx
+ subq 32(%rsp), %rcx
+ subq %rax, %rdx
+# endif
+ jns 12f
+ addq $1000000000, %rdx
+ decq %rcx
+12: testq %rcx, %rcx
+ movq 8(%rsp), %rdi
+ movq $-ETIMEDOUT, %r14
+ js 6f
+
+ /* Store relative timeout. */
+21: movq %rcx, 32(%rsp)
+ movq %rdx, 40(%rsp)
+
+ movl cond_futex(%rdi), %r12d
+
+ /* Unlock. */
+ LOCK
+# if cond_lock == 0
+ decl (%rdi)
+# else
+ decl cond_lock(%rdi)
+# endif
+ jne 3f
+
+.LcleanupSTART2:
+4: callq __pthread_enable_asynccancel
+ movl %eax, (%rsp)
+
+ leaq 32(%rsp), %r10
+ cmpq $-1, dep_mutex(%rdi)
+ movq %r12, %rdx
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT, %eax
+ movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+# else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+# if FUTEX_WAIT != 0
+ orl $FUTEX_WAIT, %esi
+# endif
+# endif
+ addq $cond_futex, %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %r14
+
+ movl (%rsp), %edi
+ callq __pthread_disable_asynccancel
+.LcleanupEND2:
+
+ /* Lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+# if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
+# else
+ cmpxchgl %esi, cond_lock(%rdi)
+# endif
+ jne 5f
+
+6: movl broadcast_seq(%rdi), %edx
+
+ movq woken_seq(%rdi), %rax
+
+ movq wakeup_seq(%rdi), %r9
+
+ cmpl 4(%rsp), %edx
+ jne 53b
+
+ cmpq 24(%rsp), %r9
+ jbe 15f
+
+ cmpq %rax, %r9
+ ja 39b
+
+15: cmpq $-ETIMEDOUT, %r14
+ jne 8b
+
+ jmp 99b
+
+ /* Initial locking failed. */
+1:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+ jmp 2b
+
+ /* Unlock in loop requires wakeup. */
+3:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+ jmp 4b
+
+ /* Locking in loop failed. */
+5:
+# if cond_lock != 0
+ addq $cond_lock, %rdi
+# endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+# if cond_lock != 0
+ subq $cond_lock, %rdi
+# endif
+ jmp 6b
+
+# if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
/* clock_gettime not available. */
-19: leaq 24(%rsp), %rdi
+19: leaq 32(%rsp), %rdi
xorl %esi, %esi
movq $VSYSCALL_ADDR_vgettimeofday, %rax
callq *%rax
/* Compute relative timeout. */
- movq 32(%rsp), %rax
+ movq 40(%rsp), %rax
movl $1000, %edx
mul %rdx /* Milli seconds to nano seconds. */
movq (%r13), %rcx
movq 8(%r13), %rdx
- subq 24(%rsp), %rcx
+ subq 32(%rsp), %rcx
subq %rax, %rdx
jns 20f
addq $1000000000, %rdx
@@ -372,97 +625,187 @@ __pthread_cond_timedwait:
movq $-ETIMEDOUT, %r14
js 6b
jmp 21b
+# endif
#endif
-.LENDCODE:
.size __pthread_cond_timedwait, .-__pthread_cond_timedwait
weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait)
- .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
- .long .LENDCIE-.LSTARTCIE # Length of the CIE.
-.LSTARTCIE:
- .long 0 # CIE ID.
- .byte 1 # Version number.
-#ifdef SHARED
- .string "zR" # NUL-terminated augmentation
- # string.
+ .align 16
+ .type __condvar_cleanup2, @function
+__condvar_cleanup2:
+ /* Stack frame:
+
+ rsp + 72
+ +--------------------------+
+ rsp + 64 | %r12 |
+ +--------------------------+
+ rsp + 56 | %r13 |
+ +--------------------------+
+ rsp + 48 | %r14 |
+ +--------------------------+
+ rsp + 24 | unused |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ movq %rax, 24(%rsp)
+
+ /* Get internal lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
#else
- .ascii "\0" # NUL-terminated augmentation
- # string.
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- .uleb128 1 # Code alignment factor.
- .sleb128 -8 # Data alignment factor.
- .byte 16 # Return address register
- # column.
-#ifdef SHARED
- .uleb128 1 # Augmentation value length.
- .byte 0x1b # Encoding: DW_EH_PE_pcrel
- # + DW_EH_PE_sdata4.
+ jz 1f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
#endif
- .byte 0x0c # DW_CFA_def_cfa
- .uleb128 7
- .uleb128 8
- .byte 0x90 # DW_CFA_offset, column 0x8
- .uleb128 1
- .align 8
-.LENDCIE:
-
- .long .LENDFDE-.LSTARTFDE # Length of the FDE.
-.LSTARTFDE:
- .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
-#ifdef SHARED
- .long .LSTARTCODE-. # PC-relative start address
- # of the code
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+
+1: movl broadcast_seq(%rdi), %edx
+ cmpl 4(%rsp), %edx
+ jne 3f
+
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movq total_seq(%rdi), %rax
+ cmpq wakeup_seq(%rdi), %rax
+ jbe 6f
+ incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+6: incq woken_seq(%rdi)
+
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ xorq %r12, %r12
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 4f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 4f
+
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_nwaiters(%rdi), %rdi
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
#else
- .long .LSTARTCODE # Start address of the code.
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
#endif
- .long .LENDCODE-.LSTARTCODE # Length of the code.
-#ifdef SHARED
- .uleb128 0 # No augmentation data.
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+ movl $1, %r12d
+
+4: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 2f
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+2: testq %r12, %r12
+ jnz 5f
+ addq $cond_futex, %rdi
+ cmpq $-1, dep_mutex-cond_futex(%rdi)
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
+ syscall
+
+5: movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+
+ movq 24(%rsp), %rdi
+ movq FRAME_SIZE(%rsp), %r15
+ movq FRAME_SIZE+8(%rsp), %r14
+ movq FRAME_SIZE+16(%rsp), %r13
+ movq FRAME_SIZE+24(%rsp), %r12
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size __condvar_cleanup2, .-__condvar_cleanup2
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART1-.LSTARTCODE
+ .uleb128 .LcleanupEND1-.LcleanupSTART1
+ .uleb128 __condvar_cleanup2-.LSTARTCODE
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcleanupSTART2-.LSTARTCODE
+ .uleb128 .LcleanupEND2-.LcleanupSTART2
+ .uleb128 __condvar_cleanup2-.LSTARTCODE
+ .uleb128 0
#endif
- .byte 0x40+.Lpush_r12-.LSTARTCODE # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 0x8c # DW_CFA_offset %r12
- .uleb128 2
- .byte 0x40+.Lpush_r13-.Lpush_r12 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 24
- .byte 0x8d # DW_CFA_offset %r13
- .uleb128 3
- .byte 0x40+.Lpush_r14-.Lpush_r13 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 32
- .byte 0x84 # DW_CFA_offset %r14
- .uleb128 4
- .byte 0x40+.Lsubq-.Lpush_r14 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 32+FRAME_SIZE
- .byte 3 # DW_CFA_advance_loc2
- .2byte .Laddq-.Lsubq
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 32
- .byte 0x40+.Lpop_r14-.Laddq # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 24
- .byte 0xce # DW_CFA_restore %r14
- .byte 0x40+.Lpop_r13-.Lpop_r14 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 0xcd # DW_CFA_restore %r13
- .byte 0x40+.Lpop_r12-.Lpop_r13 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 8
- .byte 0xcc # DW_CFA_restore %r12
- .byte 0x40+.LSbl1-.Lpop_r12 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 32+FRAME_SIZE
- .byte 0x8c # DW_CFA_offset %r12
- .uleb128 2
- .byte 0x8d # DW_CFA_offset %r13
- .uleb128 3
- .byte 0x84 # DW_CFA_offset %r14
- .uleb128 4
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
.align 8
-.LENDFDE:
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
index 544118eb7..7c488f261 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,121 +18,39 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelcond.h>
#include <tcb-offsets.h>
+#include <pthread-pi-defines.h>
-#ifdef UP
-# define LOCK
-#else
-# define LOCK lock
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
+#include <bits/kernel-features.h>
.text
- .align 16
- .type __condvar_cleanup, @function
- .globl __condvar_cleanup
- .hidden __condvar_cleanup
-__condvar_cleanup:
- pushq %r12
-
- /* Get internal lock. */
- movq %rdi, %r8
- movq 8(%rdi), %rdi
- movl $1, %esi
- xorl %eax, %eax
- LOCK
-#if cond_lock == 0
- cmpxchgl %esi, (%rdi)
-#else
- cmpxchgl %esi, cond_lock(%rdi)
-#endif
- jz 1f
-
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- callq __lll_mutex_lock_wait
-#if cond_lock != 0
- subq $cond_lock, %rdi
-#endif
-
-1: movl broadcast_seq(%rdi), %edx
- cmpl 4(%r8), %edx
- jne 3f
-
- incq wakeup_seq(%rdi)
- incq woken_seq(%rdi)
- incl cond_futex(%rdi)
-
-3: subl $(1 << clock_bits), cond_nwaiters(%rdi)
-
- /* Wake up a thread which wants to destroy the condvar object. */
- xorq %r12, %r12
- cmpq $0xffffffffffffffff, total_seq(%rdi)
- jne 4f
- movl cond_nwaiters(%rdi), %eax
- andl $~((1 << clock_bits) - 1), %eax
- jne 4f
-
- addq $cond_nwaiters, %rdi
- movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %esi
- movl $1, %edx
- syscall
- subq $cond_nwaiters, %rdi
- movl $1, %r12d
-
-4: LOCK
-#if cond_lock == 0
- decl (%rdi)
-#else
- decl cond_lock(%rdi)
-#endif
- je 2f
-#if cond_lock != 0
- addq $cond_lock, %rdi
-#endif
- callq __lll_mutex_unlock_wake
-
- /* Wake up all waiters to make sure no signal gets lost. */
-2: testq %r12, %r12
- jnz 5f
- addq $cond_futex, %rdi
- movl $FUTEX_WAKE, %esi
- movl $0x7fffffff, %edx
- movl $SYS_futex, %eax
- syscall
-
-5: movq 16(%r8), %rdi
- callq __pthread_mutex_cond_lock
-
- popq %r12
-
- retq
- .size __condvar_cleanup, .-__condvar_cleanup
-
-
/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
.globl __pthread_cond_wait
.type __pthread_cond_wait, @function
.align 16
__pthread_cond_wait:
.LSTARTCODE:
- pushq %r12
-.Lpush_r12:
-#define FRAME_SIZE 64
- subq $FRAME_SIZE, %rsp
-.Lsubq:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
+#define FRAME_SIZE 32
+ leaq -FRAME_SIZE(%rsp), %rsp
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
/* Stack frame:
- rsp + 64
- +--------------------------+
- rsp + 32 | cleanup buffer |
+ rsp + 32
+--------------------------+
rsp + 24 | old wake_seq value |
+--------------------------+
@@ -177,17 +95,7 @@ __pthread_cond_wait:
movq 8(%rsp), %rdi
incq total_seq(%rdi)
incl cond_futex(%rdi)
- addl $(1 << clock_bits), cond_nwaiters(%rdi)
-
- /* Install cancellation handler. */
-#ifdef __PIC__
- leaq __condvar_cleanup(%rip), %rsi
-#else
- leaq __condvar_cleanup, %rsi
-#endif
- leaq 32(%rsp), %rdi
- movq %rsp, %rdx
- callq __pthread_cleanup_push
+ addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
/* Get and store current wakeup_seq value. */
movq 8(%rsp), %rdi
@@ -197,7 +105,7 @@ __pthread_cond_wait:
movl %edx, 4(%rsp)
/* Unlock. */
-8: movl cond_futex(%rdi), %r12d
+8: movl cond_futex(%rdi), %edx
LOCK
#if cond_lock == 0
decl (%rdi)
@@ -206,23 +114,53 @@ __pthread_cond_wait:
#endif
jne 3f
+.LcleanupSTART:
4: callq __pthread_enable_asynccancel
movl %eax, (%rsp)
- movq 8(%rsp), %rdi
xorq %r10, %r10
- movq %r12, %rdx
- addq $cond_futex-cond_lock, %rdi
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_futex(%rdi), %rdi
+ movl $FUTEX_WAIT, %esi
+ je 60f
+
+ movq dep_mutex-cond_futex(%rdi), %r8
+ /* Requeue to a non-robust PI mutex if the PI bit is set and
+ the robust bit is not set. */
+ movl MUTEX_KIND(%r8), %eax
+ andl $(ROBUST_BIT|PI_BIT), %eax
+ cmpl $PI_BIT, %eax
+ jne 61f
+
+ movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
movl $SYS_futex, %eax
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
+ syscall
+
+ movl $1, %r8d
+#ifdef __ASSUME_REQUEUE_PI
+ jmp 62f
#else
+ cmpq $-4095, %rax
+ jnae 62f
+
+# ifndef __ASSUME_PRIVATE_FUTEX
movl $FUTEX_WAIT, %esi
+# endif
#endif
+
+61:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+#else
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
+60: xorl %r8d, %r8d
+ movl $SYS_futex, %eax
syscall
- movl (%rsp), %edi
+62: movl (%rsp), %edi
callq __pthread_disable_asynccancel
+.LcleanupEND:
/* Lock. */
movq 8(%rsp), %rdi
@@ -254,19 +192,29 @@ __pthread_cond_wait:
incq woken_seq(%rdi)
/* Unlock */
-16: subl $(1 << clock_bits), cond_nwaiters(%rdi)
+16: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
/* Wake up a thread which wants to destroy the condvar object. */
cmpq $0xffffffffffffffff, total_seq(%rdi)
jne 17f
movl cond_nwaiters(%rdi), %eax
- andl $~((1 << clock_bits) - 1), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
jne 17f
addq $cond_nwaiters, %rdi
- movl $SYS_futex, %eax
- movl $FUTEX_WAKE, %esi
+ cmpq $-1, dep_mutex-cond_nwaiters(%rdi)
movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
+#endif
+ movl $SYS_futex, %eax
syscall
subq $cond_nwaiters, %rdi
@@ -278,28 +226,36 @@ __pthread_cond_wait:
#endif
jne 10f
- /* Remove cancellation handler. */
-11: movq 32+CLEANUP_PREV(%rsp), %rdx
- movq %rdx, %fs:CLEANUP
+ /* If requeue_pi is used the kernel performs the locking of the
+ mutex. */
+11: movq 16(%rsp), %rdi
+ testl %r8d, %r8d
+ jnz 18f
- movq 16(%rsp), %rdi
callq __pthread_mutex_cond_lock
-14: addq $FRAME_SIZE, %rsp
-.Laddq:
- popq %r12
-.Lpop_r12:
+14: leaq FRAME_SIZE(%rsp), %rsp
+ cfi_adjust_cfa_offset(-FRAME_SIZE)
/* We return the result of the mutex_lock operation. */
retq
+ cfi_adjust_cfa_offset(FRAME_SIZE)
+
+18: callq __pthread_mutex_cond_lock_adjust
+ xorl %eax, %eax
+ jmp 14b
+
/* Initial locking failed. */
1:
-.LSbl1:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
jmp 2b
/* Unlock in loop requires wakeup. */
@@ -307,7 +263,15 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ /* The call preserves %rdx. */
+ callq __lll_unlock_wake
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
jmp 4b
/* Locking in loop failed. */
@@ -315,7 +279,11 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_lock_wait
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
@@ -326,7 +294,11 @@ __pthread_cond_wait:
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
jmp 11b
/* The initial unlocking of the mutex failed. */
@@ -338,83 +310,185 @@ __pthread_cond_wait:
#else
decl cond_lock(%rdi)
#endif
- jne 13f
+ je 13f
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_unlock_wake
13: movq %r10, %rax
jmp 14b
-.LENDCODE:
.size __pthread_cond_wait, .-__pthread_cond_wait
weak_alias(__pthread_cond_wait, pthread_cond_wait)
- .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
- .long .LENDCIE-.LSTARTCIE # Length of the CIE.
-.LSTARTCIE:
- .long 0 # CIE ID.
- .byte 1 # Version number.
-#ifdef SHARED
- .string "zR" # NUL-terminated augmentation
- # string.
+ .align 16
+ .type __condvar_cleanup1, @function
+ .globl __condvar_cleanup1
+ .hidden __condvar_cleanup1
+__condvar_cleanup1:
+ /* Stack frame:
+
+ rsp + 32
+ +--------------------------+
+ rsp + 24 | unused |
+ +--------------------------+
+ rsp + 16 | mutex pointer |
+ +--------------------------+
+ rsp + 8 | condvar pointer |
+ +--------------------------+
+ rsp + 4 | old broadcast_seq value |
+ +--------------------------+
+ rsp + 0 | old cancellation mode |
+ +--------------------------+
+ */
+
+ movq %rax, 24(%rsp)
+
+ /* Get internal lock. */
+ movq 8(%rsp), %rdi
+ movl $1, %esi
+ xorl %eax, %eax
+ LOCK
+#if cond_lock == 0
+ cmpxchgl %esi, (%rdi)
#else
- .ascii "\0" # NUL-terminated augmentation
- # string.
+ cmpxchgl %esi, cond_lock(%rdi)
#endif
- .uleb128 1 # Code alignment factor.
- .sleb128 -8 # Data alignment factor.
- .byte 16 # Return address register
- # column.
-#ifdef SHARED
- .uleb128 1 # Augmentation value length.
- .byte 0x1b # Encoding: DW_EH_PE_pcrel
- # + DW_EH_PE_sdata4.
+ jz 1f
+
+#if cond_lock != 0
+ addq $cond_lock, %rdi
#endif
- .byte 0x0c # DW_CFA_def_cfa
- .uleb128 7
- .uleb128 8
- .byte 0x90 # DW_CFA_offset, column 0x8
- .uleb128 1
- .align 8
-.LENDCIE:
-
- .long .LENDFDE-.LSTARTFDE # Length of the FDE.
-.LSTARTFDE:
- .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
-#ifdef SHARED
- .long .LSTARTCODE-. # PC-relative start address
- # of the code
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ callq __lll_lock_wait
+#if cond_lock != 0
+ subq $cond_lock, %rdi
+#endif
+
+1: movl broadcast_seq(%rdi), %edx
+ cmpl 4(%rsp), %edx
+ jne 3f
+
+ /* We increment the wakeup_seq counter only if it is lower than
+ total_seq. If this is not the case the thread was woken and
+ then canceled. In this case we ignore the signal. */
+ movq total_seq(%rdi), %rax
+ cmpq wakeup_seq(%rdi), %rax
+ jbe 6f
+ incq wakeup_seq(%rdi)
+ incl cond_futex(%rdi)
+6: incq woken_seq(%rdi)
+
+3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+ /* Wake up a thread which wants to destroy the condvar object. */
+ xorl %ecx, %ecx
+ cmpq $0xffffffffffffffff, total_seq(%rdi)
+ jne 4f
+ movl cond_nwaiters(%rdi), %eax
+ andl $~((1 << nwaiters_shift) - 1), %eax
+ jne 4f
+
+ cmpq $-1, dep_mutex(%rdi)
+ leaq cond_nwaiters(%rdi), %rdi
+ movl $1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
#else
- .long .LSTARTCODE # Start address of the code.
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
#endif
- .long .LENDCODE-.LSTARTCODE # Length of the code.
-#ifdef SHARED
- .uleb128 0 # No augmentation data.
+ movl $SYS_futex, %eax
+ syscall
+ subq $cond_nwaiters, %rdi
+ movl $1, %ecx
+
+4: LOCK
+#if cond_lock == 0
+ decl (%rdi)
+#else
+ decl cond_lock(%rdi)
+#endif
+ je 2f
+#if cond_lock != 0
+ addq $cond_lock, %rdi
+#endif
+ cmpq $-1, dep_mutex-cond_lock(%rdi)
+ movl $LLL_PRIVATE, %eax
+ movl $LLL_SHARED, %esi
+ cmovne %eax, %esi
+ /* The call preserves %rcx. */
+ callq __lll_unlock_wake
+
+ /* Wake up all waiters to make sure no signal gets lost. */
+2: testl %ecx, %ecx
+ jnz 5f
+ addq $cond_futex, %rdi
+ cmpq $-1, dep_mutex-cond_futex(%rdi)
+ movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE, %eax
+ movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+ cmove %eax, %esi
+#else
+ movl $0, %eax
+ movl %fs:PRIVATE_FUTEX, %esi
+ cmove %eax, %esi
+ orl $FUTEX_WAKE, %esi
#endif
- .byte 0x40+.Lpush_r12-.LSTARTCODE # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 0x8c # DW_CFA_offset %r12
- .uleb128 2
- .byte 0x40+.Lsubq-.Lpush_r12 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16+FRAME_SIZE
- .byte 3 # DW_CFA_advance_loc2
- .2byte .Laddq-.Lsubq
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 0x40+.Lpop_r12-.Laddq # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 8
- .byte 0xcc # DW_CFA_restore %r12
- .byte 0x40+.LSbl1-.Lpop_r12 # DW_CFA_advance_loc+N
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 80
- .byte 0x8c # DW_CFA_offset %r12
- .uleb128 2
+ movl $SYS_futex, %eax
+ syscall
+
+5: movq 16(%rsp), %rdi
+ callq __pthread_mutex_cond_lock
+
+ movq 24(%rsp), %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size __condvar_cleanup1, .-__condvar_cleanup1
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 __condvar_cleanup1-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
.align 8
-.LENDFDE:
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
index d8bfa26c6..0ac952b66 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,15 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <tcb-offsets.h>
+#include <lowlevellock.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
.comm __fork_generation, 4, 4
@@ -38,6 +33,15 @@
.align 16
__pthread_once:
.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
testl $2, (%rdi)
jz 1f
xorl %eax, %eax
@@ -45,7 +49,7 @@ __pthread_once:
/* Preserve the function pointer. */
1: pushq %rsi
-.Lpush_rsi:
+ cfi_adjust_cfa_offset(8)
xorq %r10, %r10
/* Not yet initialized or initialization in progress.
@@ -76,10 +80,15 @@ __pthread_once:
jnz 3f /* Different for generation -> run initializer. */
/* Somebody else got here first. Wait. */
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAIT|FUTEX_PRIVATE_FLAG, %esi
#else
+# if FUTEX_WAIT == 0
+ movl %fs:PRIVATE_FUTEX, %esi
+# else
movl $FUTEX_WAIT, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+# endif
#endif
movl $SYS_futex, %eax
syscall
@@ -87,31 +96,40 @@ __pthread_once:
/* Preserve the pointer to the control variable. */
3: pushq %rdi
-.Lpush_rdi:
+ cfi_adjust_cfa_offset(8)
+ pushq %rdi
+ cfi_adjust_cfa_offset(8)
.LcleanupSTART:
- callq *8(%rsp)
+ callq *16(%rsp)
.LcleanupEND:
/* Get the control variable address back. */
popq %rdi
-.Lpop_rdi:
+ cfi_adjust_cfa_offset(-8)
/* Sucessful run of the initializer. Signal that we are done. */
LOCK
incl (%rdi)
+ addq $8, %rsp
+ cfi_adjust_cfa_offset(-8)
+
/* Wake up all other threads. */
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
movl $FUTEX_WAKE, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
movl $SYS_futex, %eax
syscall
4: addq $8, %rsp
-.Ladd:
+ cfi_adjust_cfa_offset(-8)
xorl %eax, %eax
retq
-
.size __pthread_once,.-__pthread_once
@@ -125,12 +143,18 @@ pthread_once = __pthread_once
.type clear_once_control,@function
.align 16
clear_once_control:
+ cfi_adjust_cfa_offset(3 * 8)
movq (%rsp), %rdi
movq %rax, %r8
movl $0, (%rdi)
movl $0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_WAKE|FUTEX_PRIVATE_FLAG, %esi
+#else
movl $FUTEX_WAKE, %esi
+ orl %fs:PRIVATE_FUTEX, %esi
+#endif
movl $SYS_futex, %eax
syscall
@@ -139,15 +163,15 @@ clear_once_control:
call _Unwind_Resume@PLT
hlt
.LENDCODE:
+ cfi_endproc
.size clear_once_control,.-clear_once_control
.section .gcc_except_table,"a",@progbits
.LexceptSTART:
- .byte 0xff # @LPStart format (omit)
- .byte 0xff # @TType format (omit)
- .byte 0x01 # call-site format
- # DW_EH_PE_uleb128
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
.uleb128 .Lcstend-.Lcstbegin
.Lcstbegin:
.uleb128 .LcleanupSTART-.LSTARTCODE
@@ -161,92 +185,6 @@ clear_once_control:
.Lcstend:
- .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
- .long .LENDCIE-.LSTARTCIE # Length of the CIE.
-.LSTARTCIE:
- .long 0 # CIE ID.
- .byte 1 # Version number.
-#ifdef SHARED
- .string "zPLR" # NUL-terminated augmentation
- # string.
-#else
- .string "zPL" # NUL-terminated augmentation
- # string.
-#endif
- .uleb128 1 # Code alignment factor.
- .sleb128 -8 # Data alignment factor.
- .byte 16 # Return address register
- # column.
-#ifdef SHARED
- .uleb128 7 # Augmentation value length.
- .byte 0x9b # Personality: DW_EH_PE_pcrel
- # + DW_EH_PE_sdata4
- # + DW_EH_PE_indirect
- .long DW.ref.__gcc_personality_v0-.
- .byte 0x1b # LSDA Encoding: DW_EH_PE_pcrel
- # + DW_EH_PE_sdata4.
- .byte 0x1b # FDE Encoding: DW_EH_PE_pcrel
- # + DW_EH_PE_sdata4.
-#else
- .uleb128 10 # Augmentation value length.
- .byte 0x0 # Personality: absolute
- .quad __gcc_personality_v0
- .byte 0x0 # LSDA Encoding: absolute
-#endif
- .byte 0x0c # DW_CFA_def_cfa
- .uleb128 7
- .uleb128 8
- .byte 0x90 # DW_CFA_offset, column 0x10
- .uleb128 1
- .align 8
-.LENDCIE:
-
- .long .LENDFDE-.LSTARTFDE # Length of the FDE.
-.LSTARTFDE:
- .long .LSTARTFDE-.LSTARTFRAME # CIE pointer.
-#ifdef SHARED
- .long .LSTARTCODE-. # PC-relative start address
- # of the code.
- .long .LENDCODE-.LSTARTCODE # Length of the code.
- .uleb128 4 # Augmentation size
- .long .LexceptSTART-.
-#else
- .quad .LSTARTCODE # Start address of the code.
- .quad .LENDCODE-.LSTARTCODE # Length of the code.
- .uleb128 8 # Augmentation size
- .quad .LexceptSTART
-#endif
- .byte 4 # DW_CFA_advance_loc4
- .long .Lpush_rsi-.LSTARTCODE
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 4 # DW_CFA_advance_loc4
- .long .Lpush_rdi-.Lpush_rsi
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 24
- .byte 4 # DW_CFA_advance_loc4
- .long .Lpop_rdi-.Lpush_rdi
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
- .byte 4 # DW_CFA_advance_loc4
- .long .Ladd-.Lpop_rdi
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 8
- .byte 4 # DW_CFA_advance_loc4
- .long clear_once_control-.Ladd
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 24
-#if 0
- .byte 4 # DW_CFA_advance_loc4
- .long .Lpop_rdi2-clear_once_control
- .byte 14 # DW_CFA_def_cfa_offset
- .uleb128 16
-#endif
- .align 8
-.LENDFDE:
-
-
#ifdef SHARED
.hidden DW.ref.__gcc_personality_v0
.weak DW.ref.__gcc_personality_v0
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
index d7543572a..9b8408b69 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_rdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,6 +30,7 @@
.type __pthread_rwlock_rdlock,@function
.align 16
__pthread_rwlock_rdlock:
+ cfi_startproc
xorq %r10, %r10
/* Get the lock. */
@@ -73,12 +65,20 @@ __pthread_rwlock_rdlock:
#endif
jne 10f
-11: addq $READERS_WAKEUP, %rdi
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%rdi), %esi
#else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%rdi), %esi
+# else
movl $FUTEX_WAIT, %esi
+ orl PSHARED(%rdi), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
#endif
+ addq $READERS_WAKEUP, %rdi
movl $SYS_futex, %eax
syscall
@@ -113,11 +113,11 @@ __pthread_rwlock_rdlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -129,11 +129,11 @@ __pthread_rwlock_rdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -149,25 +149,26 @@ __pthread_rwlock_rdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX == 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_rdlock,.-__pthread_rwlock_rdlock
.globl pthread_rwlock_rdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
index f044842e0..bb12d4941 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedrdlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,35 +18,39 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedrdlock
.type pthread_rwlock_timedrdlock,@function
.align 16
pthread_rwlock_timedrdlock:
+ cfi_startproc
pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG %edx
+#else
pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+
subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
movq %rdi, %r12
movq %rsi, %r13
@@ -77,7 +81,7 @@ pthread_rwlock_timedrdlock:
incl READERS_QUEUED(%r12)
je 4f
- movl READERS_WAKEUP(%r12), %r14d
+ movl READERS_WAKEUP(%r12), VALREG
/* Unlock. */
LOCK
@@ -88,8 +92,33 @@ pthread_rwlock_timedrdlock:
#endif
jne 10f
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ xorl PSHARED(%r12), %esi
+ movq %r13, %r10
+ movl $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ movl %r14d, %edx
+#endif
+21: leaq READERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .subsection 2
+.Lreltmo:
/* Get current time. */
-11: movq %rsp, %rdi
+ movq %rsp, %rdi
xorl %esi, %esi
movq $VSYSCALL_ADDR_vgettimeofday, %rax
callq *%rax
@@ -112,20 +141,26 @@ pthread_rwlock_timedrdlock:
movq %rcx, (%rsp) /* Store relative timeout. */
movq %rdi, 8(%rsp)
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%r12), %esi
+# else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%r12), %esi
+# else
movl $FUTEX_WAIT, %esi
-#endif
+ orl PSHARED(%r12), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+# endif
movq %rsp, %r10
movl %r14d, %edx
- leaq READERS_WAKEUP(%r12), %rdi
- movl $SYS_futex, %eax
- syscall
- movq %rax, %rdx
-17:
- /* Reget the lock. */
+ jmp 21b
+ .previous
+#endif
+
+17: /* Reget the lock. */
movl $1, %esi
xorl %eax, %eax
LOCK
@@ -157,17 +192,36 @@ pthread_rwlock_timedrdlock:
7: movq %rdx, %rax
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+#endif
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
retq
-1:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ cfi_adjust_cfa_offset(16)
+ cfi_rel_offset(%r12, 8)
+ cfi_rel_offset(%r13, 0)
+#else
+ cfi_adjust_cfa_offset(40)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
+ cfi_offset(%r14, -32)
+#endif
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -175,13 +229,13 @@ pthread_rwlock_timedrdlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -194,22 +248,22 @@ pthread_rwlock_timedrdlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
@@ -217,4 +271,5 @@ pthread_rwlock_timedrdlock:
19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedrdlock,.-pthread_rwlock_timedrdlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
index b479da727..401bbc5d9 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_timedwrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,34 +18,39 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
+#include <bits/kernel-features.h>
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-
.text
.globl pthread_rwlock_timedwrlock
.type pthread_rwlock_timedwrlock,@function
.align 16
pthread_rwlock_timedwrlock:
+ cfi_startproc
pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
pushq %r13
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define VALREG %edx
+#else
pushq %r14
+ cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r14, 0)
+
subq $16, %rsp
+ cfi_adjust_cfa_offset(16)
+# define VALREG %r14d
+#endif
movq %rdi, %r12
movq %rsi, %r13
@@ -74,7 +79,7 @@ pthread_rwlock_timedwrlock:
incl WRITERS_QUEUED(%r12)
je 4f
- movl WRITERS_WAKEUP(%r12), %r14d
+ movl WRITERS_WAKEUP(%r12), VALREG
LOCK
#if MUTEX == 0
@@ -84,8 +89,33 @@ pthread_rwlock_timedwrlock:
#endif
jne 10f
+11:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ xorl PSHARED(%r12), %esi
+ movq %r13, %r10
+ movl $0xffffffff, %r9d
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ movl %r14d, %edx
+#endif
+21: leaq WRITERS_WAKEUP(%r12), %rdi
+ movl $SYS_futex, %eax
+ syscall
+ movq %rax, %rdx
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .subsection 2
+.Lreltmo:
/* Get current time. */
-11: movq %rsp, %rdi
+ movq %rsp, %rdi
xorl %esi, %esi
movq $VSYSCALL_ADDR_vgettimeofday, %rax
callq *%rax
@@ -108,20 +138,26 @@ pthread_rwlock_timedwrlock:
movq %rcx, (%rsp) /* Store relative timeout. */
movq %rdi, 8(%rsp)
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
-#else
+# ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%r12), %esi
+# else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%r12), %esi
+# else
movl $FUTEX_WAIT, %esi
-#endif
+ orl PSHARED(%r12), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
+# endif
movq %rsp, %r10
movl %r14d, %edx
- leaq WRITERS_WAKEUP(%r12), %rdi
- movl $SYS_futex, %eax
- syscall
- movq %rax, %rdx
-17:
- /* Reget the lock. */
+ jmp 21b
+ .previous
+#endif
+
+17: /* Reget the lock. */
movl $1, %esi
xorl %eax, %eax
LOCK
@@ -153,17 +189,36 @@ pthread_rwlock_timedwrlock:
7: movq %rdx, %rax
+#ifndef __ASSUME_PRIVATE_FUTEX
addq $16, %rsp
+ cfi_adjust_cfa_offset(-16)
popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r14)
+#endif
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(%r12)
retq
-1:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ cfi_adjust_cfa_offset(16)
+ cfi_rel_offset(%r12, 8)
+ cfi_rel_offset(%r13, 0)
+#else
+ cfi_adjust_cfa_offset(40)
+ cfi_offset(%r12, -16)
+ cfi_offset(%r13, -24)
+ cfi_offset(%r14, -32)
+#endif
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 2b
14: cmpl %fs:TID, %eax
@@ -171,13 +226,13 @@ pthread_rwlock_timedwrlock:
20: movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leal MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
/* Overflow. */
@@ -185,22 +240,22 @@ pthread_rwlock_timedwrlock:
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 11b
-12:
+12: movl PSHARED(%r12), %esi
#if MUTEX == 0
movq %r12, %rdi
#else
leaq MUTEX(%r12), %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
jmp 13b
16: movq $-ETIMEDOUT, %rdx
@@ -208,4 +263,5 @@ pthread_rwlock_timedwrlock:
19: movl $EINVAL, %edx
jmp 9b
+ cfi_endproc
.size pthread_rwlock_timedwrlock,.-pthread_rwlock_timedwrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
index a0f75226a..cfcc7a18c 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_unlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,17 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -37,6 +29,7 @@
.type __pthread_rwlock_unlock,@function
.align 16
__pthread_rwlock_unlock:
+ cfi_startproc
/* Get the lock. */
movl $1, %esi
xorl %eax, %eax
@@ -55,9 +48,8 @@ __pthread_rwlock_unlock:
5: movl $0, WRITER(%rdi)
- movl $1, %esi
+ movl $1, %edx
leaq WRITERS_WAKEUP(%rdi), %r10
- movq %rsi, %rdx
cmpl $0, WRITERS_QUEUED(%rdi)
jne 0f
@@ -77,7 +69,16 @@ __pthread_rwlock_unlock:
#endif
jne 7f
-8: movl $SYS_futex, %eax
+8:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAKE, %esi
+ xorl PSHARED(%rdi), %esi
+#else
+ movl $FUTEX_WAKE, %esi
+ orl PSHARED(%rdi), %esi
+ xorl %fs:PRIVATE_FUTEX, %esi
+#endif
+ movl $SYS_futex, %eax
movq %r10, %rdi
syscall
@@ -96,30 +97,30 @@ __pthread_rwlock_unlock:
4: xorl %eax, %eax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 2b
-3:
+3: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 4b
-7:
+7: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 8b
-
+ cfi_endproc
.size __pthread_rwlock_unlock,.-__pthread_rwlock_unlock
.globl pthread_rwlock_unlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
index 39b54dc6b..b7bc8522d 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_rwlock_wrlock.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,19 +18,10 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <lowlevelrwlock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
+#include <bits/kernel-features.h>
.text
@@ -39,6 +30,7 @@
.type __pthread_rwlock_wrlock,@function
.align 16
__pthread_rwlock_wrlock:
+ cfi_startproc
xorq %r10, %r10
/* Get the lock. */
@@ -71,12 +63,20 @@ __pthread_rwlock_wrlock:
#endif
jne 10f
-11: addq $WRITERS_WAKEUP, %rdi
-#if FUTEX_WAIT == 0
- xorl %esi, %esi
+11:
+#ifdef __ASSUME_PRIVATE_FUTEX
+ movl $FUTEX_PRIVATE_FLAG|FUTEX_WAIT, %esi
+ xorl PSHARED(%rdi), %esi
#else
+# if FUTEX_WAIT == 0
+ movl PSHARED(%rdi), %esi
+# else
movl $FUTEX_WAIT, %esi
+ orl PSHARED(%rdi), %esi
+# endif
+ xorl %fs:PRIVATE_FUTEX, %esi
#endif
+ addq $WRITERS_WAKEUP, %rdi
movl $SYS_futex, %eax
syscall
@@ -111,11 +111,11 @@ __pthread_rwlock_wrlock:
movq %rdx, %rax
retq
-1:
+1: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
@@ -126,36 +126,37 @@ __pthread_rwlock_wrlock:
movl $EDEADLK, %edx
jmp 9b
-6:
+6: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
jmp 7b
4: decl WRITERS_QUEUED(%rdi)
movl $EAGAIN, %edx
jmp 9b
-10:
+10: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_unlock_wake
+ callq __lll_unlock_wake
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 11b
-12:
+12: movl PSHARED(%rdi), %esi
#if MUTEX != 0
addq $MUTEX, %rdi
#endif
- callq __lll_mutex_lock_wait
+ callq __lll_lock_wait
#if MUTEX != 0
subq $MUTEX, %rdi
#endif
jmp 13b
+ cfi_endproc
.size __pthread_rwlock_wrlock,.-__pthread_rwlock_wrlock
.globl pthread_rwlock_wrlock
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
index 5c8a858ad..7af6524fe 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_post.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2008 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,15 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <pthread-errnos.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
-#define FUTEX_WAKE 1
+#include <structsem.h>
.text
@@ -35,30 +29,61 @@
.type sem_post,@function
.align 16
sem_post:
- movl $1, %edx
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+0: cmpl $SEM_VALUE_MAX, %eax
+ je 3f
+ leal 1(%rax), %esi
LOCK
- xaddl %edx, (%rdi)
+#if VALUE == 0
+ cmpxchgl %esi, (%rdi)
+#else
+ cmpxchgl %esi, VALUE(%rdi)
+#endif
+ jnz 0b
+
+ cmpq $0, NWAITERS(%rdi)
+ je 2f
movl $SYS_futex, %eax
movl $FUTEX_WAKE, %esi
- incl %edx
+ orl PRIVATE(%rdi), %esi
+ movl $1, %edx
syscall
testq %rax, %rax
js 1f
- xorl %eax, %eax
+2: xorl %eax, %eax
retq
1:
#if USE___THREAD
- movq errno@gottpoff(%rip), %rdx
- movl $EINVAL, %fs:(%rdx)
+ movl $EINVAL, %eax
+#else
+ callq __errno_location@plt
+ movl $EINVAL, %edx
+#endif
+ jmp 4f
+
+3:
+#if USE___THREAD
+ movl $EOVERFLOW, %eax
#else
callq __errno_location@plt
- movl $EINVAL, (%rax)
+ movl $EOVERFLOW, %edx
#endif
+4:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %eax, %fs:(%rdx)
+#else
+ movl %edx, (%rax)
+#endif
orl $-1, %eax
retq
.size sem_post,.-sem_post
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
index 64e168099..f9af8ecc1 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,70 +18,183 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <bits/kernel-features.h>
+#include <lowlevellock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
+#include <structsem.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
/* For the calculation see asm/vsyscall.h. */
#define VSYSCALL_ADDR_vgettimeofday 0xffffffffff600000
-
.text
.globl sem_timedwait
.type sem_timedwait,@function
.align 16
- cfi_startproc
sem_timedwait:
- /* First check for cancellation. */
- movl %fs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 11f
-
+.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+#if VALUE == 0
movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
2: testl %eax, %eax
je 1f
leaq -1(%rax), %rdx
LOCK
+#if VALUE == 0
cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
jne 2b
xorl %eax, %eax
retq
/* Check whether the timeout value is valid. */
-1: pushq %r12
+1: cmpq $1000000000, 8(%rsi)
+ jae 6f
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+# ifdef PIC
+ cmpl $0, __have_futex_clock_realtime(%rip)
+# else
+ cmpl $0, __have_futex_clock_realtime
+# endif
+ je .Lreltmo
+#endif
+
+ /* This push is only needed to store the sem_t pointer for the
+ exception handler. */
+ pushq %rdi
+ cfi_adjust_cfa_offset(8)
+
+ movq %rsi, %r10
+
+ LOCK
+ addq $1, NWAITERS(%rdi)
+
+.LcleanupSTART:
+13: call __pthread_enable_asynccancel
+ movl %eax, %r8d
+
+#if VALUE != 0
+ leaq VALUE(%rdi), %rdi
+#endif
+ movl $0xffffffff, %r9d
+ movl $FUTEX_WAIT_BITSET|FUTEX_CLOCK_REALTIME, %esi
+ orl PRIVATE(%rdi), %esi
+ movl $SYS_futex, %eax
+ xorl %edx, %edx
+ syscall
+ movq %rax, %r9
+#if VALUE != 0
+ leaq -VALUE(%rdi), %rdi
+#endif
+
+ xchgq %r8, %rdi
+ call __pthread_disable_asynccancel
+.LcleanupEND:
+ movq %r8, %rdi
+
+ testq %r9, %r9
+ je 11f
+ cmpq $-EWOULDBLOCK, %r9
+ jne 3f
+
+11:
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+14: testl %eax, %eax
+ je 13b
+
+ leaq -1(%rax), %rcx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %ecx, (%rdi)
+#else
+ cmpxchgl %ecx, VALUE(%rdi)
+#endif
+ jne 14b
+
+ xorl %eax, %eax
+
+15: LOCK
+ subq $1, NWAITERS(%rdi)
+
+ leaq 8(%rsp), %rsp
+ cfi_adjust_cfa_offset(-8)
+ retq
+
+ cfi_adjust_cfa_offset(8)
+3: negq %r9
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl %r9d, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl %r9d, (%rax)
+#endif
+
+ orl $-1, %eax
+ jmp 15b
+
+ cfi_adjust_cfa_offset(-8)
+6:
+#if USE___THREAD
+ movq errno@gottpoff(%rip), %rdx
+ movl $EINVAL, %fs:(%rdx)
+#else
+ callq __errno_location@plt
+ movl $EINVAL, (%rax)
+#endif
+
+ orl $-1, %eax
+
+ retq
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+ pushq %r12
cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r12, 0)
pushq %r13
cfi_adjust_cfa_offset(8)
+ cfi_rel_offset(%r13, 0)
pushq %r14
cfi_adjust_cfa_offset(8)
- subq $24, %rsp
- cfi_adjust_cfa_offset(24)
+ cfi_rel_offset(%r14, 0)
+
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define STACKFRAME 8
+#else
+# define STACKFRAME 24
+#endif
+ subq $STACKFRAME, %rsp
+ cfi_adjust_cfa_offset(STACKFRAME)
movq %rdi, %r12
- cfi_offset(12, -16) /* %r12 */
movq %rsi, %r13
- cfi_offset(13, -24) /* %r13 */
- /* Check for invalid nanosecond field. */
- cmpq $1000000000, 8(%r13)
- movl $EINVAL, %r14d
- cfi_offset(14, -24) /* %r14 */
- jae 6f
-
-7: call __pthread_enable_asynccancel
- movl %eax, 16(%rsp)
+ LOCK
+ addq $1, NWAITERS(%r12)
- xorl %esi, %esi
+7: xorl %esi, %esi
movq %rsp, %rdi
movq $VSYSCALL_ADDR_vgettimeofday, %rax
callq *%rax
@@ -99,14 +212,27 @@ sem_timedwait:
decq %rdi
5: testq %rdi, %rdi
movl $ETIMEDOUT, %r14d
- js 6f /* Time is already up. */
+ js 36f /* Time is already up. */
movq %rdi, (%rsp) /* Store relative timeout. */
movq %rsi, 8(%rsp)
+.LcleanupSTART2:
+ call __pthread_enable_asynccancel
+ movl %eax, 16(%rsp)
+
movq %rsp, %r10
+# if VALUE == 0
movq %r12, %rdi
- xorl %esi, %esi
+# else
+ leaq VALUE(%r12), %rdi
+# endif
+# if FUTEX_WAIT == 0
+ movl PRIVATE(%rdi), %esi
+# else
+ movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
+# endif
movl $SYS_futex, %eax
xorl %edx, %edx
syscall
@@ -114,41 +240,55 @@ sem_timedwait:
movl 16(%rsp), %edi
call __pthread_disable_asynccancel
+.LcleanupEND2:
testq %r14, %r14
je 9f
cmpq $-EWOULDBLOCK, %r14
- jne 3f
+ jne 33f
-9: movl (%r12), %eax
+9:
+# if VALUE == 0
+ movl (%r12), %eax
+# else
+ movl VALUE(%r12), %eax
+# endif
8: testl %eax, %eax
je 7b
leaq -1(%rax), %rcx
LOCK
+# if VALUE == 0
cmpxchgl %ecx, (%r12)
+# else
+ cmpxchgl %ecx, VALUE(%r12)
+# endif
jne 8b
xorl %eax, %eax
-10: addq $24, %rsp
- cfi_adjust_cfa_offset(-24)
+
+45: LOCK
+ subq $1, NWAITERS(%r12)
+
+ addq $STACKFRAME, %rsp
+ cfi_adjust_cfa_offset(-STACKFRAME)
popq %r14
cfi_adjust_cfa_offset(-8)
- cfi_restore(14)
+ cfi_restore(%r14)
popq %r13
cfi_adjust_cfa_offset(-8)
- cfi_restore(13)
+ cfi_restore(%r13)
popq %r12
cfi_adjust_cfa_offset(-8)
- cfi_restore(12)
+ cfi_restore(%r12)
retq
- cfi_adjust_cfa_offset(48)
- cfi_offset(12, -16) /* %r12 */
- cfi_offset(13, -24) /* %r13 */
- cfi_offset(14, -32) /* %r14 */
-3: negq %r14
-6:
+ cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+ cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+ cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+ cfi_rel_offset(%r14, STACKFRAME)
+33: negq %r14
+36:
#if USE___THREAD
movq errno@gottpoff(%rip), %rdx
movl %r14d, %fs:(%rdx)
@@ -158,17 +298,90 @@ sem_timedwait:
#endif
orl $-1, %eax
- jmp 10b
- cfi_adjust_cfa_offset(-48)
- cfi_restore(14)
- cfi_restore(13)
- cfi_restore(12)
-
-11: /* Canceled. */
- movq $0xffffffffffffffff, %fs:RESULT
- LOCK
- orl $0x10, %fs:CANCELHANDLING
- movq %fs:CLEANUP_JMP_BUF, %rdi
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
+ jmp 45b
+#endif
cfi_endproc
.size sem_timedwait,.-sem_timedwait
+
+
+ .type sem_timedwait_cleanup,@function
+sem_timedwait_cleanup:
+ cfi_startproc
+ cfi_adjust_cfa_offset(8)
+
+ movq (%rsp), %rdi
+ LOCK
+ subq $1, NWAITERS(%rdi)
+ movq %rax, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
+ cfi_endproc
+ .size sem_timedwait_cleanup,.-sem_timedwait_cleanup
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .type sem_timedwait_cleanup2,@function
+sem_timedwait_cleanup2:
+ cfi_startproc
+ cfi_adjust_cfa_offset(STACKFRAME + 3 * 8)
+ cfi_rel_offset(%r12, STACKFRAME + 2 * 8)
+ cfi_rel_offset(%r13, STACKFRAME + 1 * 8)
+ cfi_rel_offset(%r14, STACKFRAME)
+
+ LOCK
+ subq $1, NWAITERS(%r12)
+ movq %rax, %rdi
+ movq STACKFRAME(%rsp), %r14
+ movq STACKFRAME+8(%rsp), %r13
+ movq STACKFRAME+16(%rsp), %r12
+.LcallUR2:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE2:
+ cfi_endproc
+ .size sem_timedwait_cleanup2,.-sem_timedwait_cleanup2
+#endif
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_timedwait_cleanup-.LSTARTCODE
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcleanupSTART2-.LSTARTCODE
+ .uleb128 .LcleanupEND2-.LcleanupSTART2
+ .uleb128 sem_timedwait_cleanup2-.LSTARTCODE
+ .uleb128 0
+#endif
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+ .uleb128 .LcallUR2-.LSTARTCODE
+ .uleb128 .LENDCODE2-.LcallUR2
+ .uleb128 0
+ .uleb128 0
+#endif
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
index 08edc390c..7b7f63ddb 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_trywait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,14 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <pthread-errnos.h>
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
-
.text
.globl sem_trywait
@@ -36,7 +31,7 @@ sem_trywait:
2: testl %eax, %eax
jz 1f
- leaq -1(%rax), %rdx
+ leal -1(%rax), %edx
LOCK
cmpxchgl %edx, (%rdi)
jne 2b
diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
index c2f94d47f..73d1d1633 100644
--- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
+++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
@@ -18,15 +18,9 @@
02111-1307 USA. */
#include <sysdep.h>
+#include <lowlevellock.h>
#include <pthread-errnos.h>
-#include <tcb-offsets.h>
-#include <tls.h>
-
-#ifndef UP
-# define LOCK lock
-#else
-# define
-#endif
+#include <structsem.h>
.text
@@ -34,86 +28,155 @@
.globl sem_wait
.type sem_wait,@function
.align 16
- cfi_startproc
sem_wait:
- /* First check for cancellation. */
- movl %fs:CANCELHANDLING, %eax
- andl $0xfffffff9, %eax
- cmpl $8, %eax
- je 4f
-
- pushq %r12
- cfi_adjust_cfa_offset(8)
- cfi_offset(12, -16)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- movq %rdi, %r13
- cfi_offset(13, -24)
+.LSTARTCODE:
+ cfi_startproc
+#ifdef SHARED
+ cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+ DW.ref.__gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+ cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+ cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
-3: movl (%r13), %eax
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
2: testl %eax, %eax
je 1f
- leaq -1(%rax), %rdx
+ leal -1(%rax), %edx
LOCK
- cmpxchgl %edx, (%r13)
+#if VALUE == 0
+ cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
jne 2b
+
xorl %eax, %eax
+ retq
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(12)
+ /* This push is only needed to store the sem_t pointer for the
+ exception handler. */
+1: pushq %rdi
+ cfi_adjust_cfa_offset(8)
- retq
+ LOCK
+ addq $1, NWAITERS(%rdi)
- cfi_adjust_cfa_offset(16)
- cfi_offset(12, -16)
- cfi_offset(13, -24)
-1: call __pthread_enable_asynccancel
+.LcleanupSTART:
+6: call __pthread_enable_asynccancel
movl %eax, %r8d
xorq %r10, %r10
movl $SYS_futex, %eax
- movq %r13, %rdi
- movq %r10, %rsi
- movq %r10, %rdx
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%rdi), %esi
+#else
+ movl $FUTEX_WAIT, %esi
+ orl PRIVATE(%rdi), %esi
+#endif
+ xorl %edx, %edx
syscall
- movq %rax, %r12
+ movq %rax, %rcx
- movl %r8d, %edi
+ xchgq %r8, %rdi
call __pthread_disable_asynccancel
+.LcleanupEND:
+ movq %r8, %rdi
- testq %r12, %r12
- je 3b
- cmpq $-EWOULDBLOCK, %r12
- je 3b
- negq %r12
+ testq %rcx, %rcx
+ je 3f
+ cmpq $-EWOULDBLOCK, %rcx
+ jne 4f
+
+3:
+#if VALUE == 0
+ movl (%rdi), %eax
+#else
+ movl VALUE(%rdi), %eax
+#endif
+5: testl %eax, %eax
+ je 6b
+
+ leal -1(%rax), %edx
+ LOCK
+#if VALUE == 0
+ cmpxchgl %edx, (%rdi)
+#else
+ cmpxchgl %edx, VALUE(%rdi)
+#endif
+ jne 5b
+
+ xorl %eax, %eax
+
+9: LOCK
+ subq $1, NWAITERS(%rdi)
+
+ leaq 8(%rsp), %rsp
+ cfi_adjust_cfa_offset(-8)
+
+ retq
+
+ cfi_adjust_cfa_offset(8)
+4: negq %rcx
#if USE___THREAD
movq errno@gottpoff(%rip), %rdx
- movl %r12d, %fs:(%rdx)
+ movl %ecx, %fs:(%rdx)
#else
+# error "not supported. %rcx and %rdi must be preserved"
callq __errno_location@plt
- movl %r12d, (%rax)
+ movl %ecx, (%rax)
#endif
orl $-1, %eax
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(12)
+ jmp 9b
+ .size sem_wait,.-sem_wait
- retq
-4: /* Canceled. */
- movq $0xffffffffffffffff, %fs:RESULT
+ .type sem_wait_cleanup,@function
+sem_wait_cleanup:
+ movq (%rsp), %rdi
LOCK
- orl $0x10, %fs:CANCELHANDLING
- movq %fs:CLEANUP_JMP_BUF, %rdi
- jmp HIDDEN_JUMPTARGET (__pthread_unwind)
+ subq $1, NWAITERS(%rdi)
+ movq %rax, %rdi
+.LcallUR:
+ call _Unwind_Resume@PLT
+ hlt
+.LENDCODE:
cfi_endproc
- .size sem_wait,.-sem_wait
+ .size sem_wait_cleanup,.-sem_wait_cleanup
+
+
+ .section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+ .byte DW_EH_PE_omit # @LPStart format
+ .byte DW_EH_PE_omit # @TType format
+ .byte DW_EH_PE_uleb128 # call-site format
+ .uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+ .uleb128 .LcleanupSTART-.LSTARTCODE
+ .uleb128 .LcleanupEND-.LcleanupSTART
+ .uleb128 sem_wait_cleanup-.LSTARTCODE
+ .uleb128 0
+ .uleb128 .LcallUR-.LSTARTCODE
+ .uleb128 .LENDCODE-.LcallUR
+ .uleb128 0
+ .uleb128 0
+.Lcstend:
+
+
+#ifdef SHARED
+ .hidden DW.ref.__gcc_personality_v0
+ .weak DW.ref.__gcc_personality_v0
+ .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+ .align 8
+ .type DW.ref.__gcc_personality_v0, @object
+ .size DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+ .quad __gcc_personality_v0
+#endif
diff --git a/libpthread/nptl/sysdeps/x86_64/Makefile b/libpthread/nptl/sysdeps/x86_64/Makefile
deleted file mode 100644
index 2f0d88f30..000000000
--- a/libpthread/nptl/sysdeps/x86_64/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (C) 2002, 2003 Free Software Foundation, Inc.
-# This file is part of the GNU C Library.
-
-# The GNU C Library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-
-# The GNU C Library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-
-# You should have received a copy of the GNU Lesser General Public
-# License along with the GNU C Library; if not, write to the Free
-# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
-# 02111-1307 USA.
-
-ifeq ($(subdir),csu)
-gen-as-const-headers += tcb-offsets.sym
-endif
-
-ifeq ($(subdir),nptl)
-CFLAGS-pthread_create.c += -mpreferred-stack-boundary=4
-CFLAGS-tst-align.c += -mpreferred-stack-boundary=4
-CFLAGS-tst-align2.c += -mpreferred-stack-boundary=4
-endif
diff --git a/libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym b/libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym
index 8118d2df8..cf863752e 100644
--- a/libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym
+++ b/libpthread/nptl/sysdeps/x86_64/tcb-offsets.sym
@@ -10,3 +10,19 @@ CLEANUP offsetof (struct pthread, cleanup)
CLEANUP_PREV offsetof (struct _pthread_cleanup_buffer, __prev)
MUTEX_FUTEX offsetof (pthread_mutex_t, __data.__lock)
MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads)
+POINTER_GUARD offsetof (tcbhead_t, pointer_guard)
+VGETCPU_CACHE_OFFSET offsetof (tcbhead_t, vgetcpu_cache)
+#ifndef __ASSUME_PRIVATE_FUTEX
+PRIVATE_FUTEX offsetof (tcbhead_t, private_futex)
+#endif
+RTLD_SAVESPACE_SSE offsetof (tcbhead_t, rtld_savespace_sse)
+
+-- Not strictly offsets, but these values are also used in the TCB.
+TCB_CANCELSTATE_BITMASK CANCELSTATE_BITMASK
+TCB_CANCELTYPE_BITMASK CANCELTYPE_BITMASK
+TCB_CANCELING_BITMASK CANCELING_BITMASK
+TCB_CANCELED_BITMASK CANCELED_BITMASK
+TCB_EXITING_BITMASK EXITING_BITMASK
+TCB_CANCEL_RESTMASK CANCEL_RESTMASK
+TCB_TERMINATED_BITMASK TERMINATED_BITMASK
+TCB_PTHREAD_CANCELED PTHREAD_CANCELED
diff --git a/libpthread/nptl/sysdeps/x86_64/tls.h b/libpthread/nptl/sysdeps/x86_64/tls.h
index 7b40e3d57..396ad4213 100644
--- a/libpthread/nptl/sysdeps/x86_64/tls.h
+++ b/libpthread/nptl/sysdeps/x86_64/tls.h
@@ -27,6 +27,7 @@
# include <stdint.h>
# include <stdlib.h>
# include <sysdep.h>
+# include <bits/kernel-features.h>
# include <bits/wordsize.h>
# include <xmmintrin.h>
@@ -85,7 +86,7 @@ typedef struct
#define HAVE_TLS_MODEL_ATTRIBUTE 1
/* Signal that TLS support is available. */
-#define USE_TLS 1
+#define USE_TLS 1
/* Alignment requirement for the stack. */
#define STACK_ALIGN 16