diff options
Diffstat (limited to 'main/musl/0004-fix-missing-synchronization-in-atomic-store-on-i386-.patch')
-rw-r--r-- | main/musl/0004-fix-missing-synchronization-in-atomic-store-on-i386-.patch | 68 |
1 files changed, 68 insertions, 0 deletions
diff --git a/main/musl/0004-fix-missing-synchronization-in-atomic-store-on-i386-.patch b/main/musl/0004-fix-missing-synchronization-in-atomic-store-on-i386-.patch new file mode 100644 index 0000000000..f3195ff39e --- /dev/null +++ b/main/musl/0004-fix-missing-synchronization-in-atomic-store-on-i386-.patch @@ -0,0 +1,68 @@ +From 3c43c0761e1725fd5f89a9c028cbf43250abb913 Mon Sep 17 00:00:00 2001 +From: Rich Felker <dalias@aerifal.cx> +Date: Tue, 28 Jul 2015 18:40:18 +0000 +Subject: [PATCH] fix missing synchronization in atomic store on i386 and + x86_64 + +despite being strongly ordered, the x86 memory model does not preclude +reordering of loads across earlier stores. while a plain store +suffices as a release barrier, we actually need a full barrier, since +users of a_store subsequently load a waiter count to determine whether +to issue a futex wait, and using a stale count will result in soft +(fail-to-wake) deadlocks. these deadlocks were observed in malloc and +possible with stdio locks and other libc-internal locking. + +on i386, an atomic operation on the caller's stack is used as the +barrier rather than performing the store itself using xchg; this +avoids the need to read the cache line on which the store is being +performed. mfence is used on x86_64 where it's always available, and +could be used on i386 with the appropriate cpu model checks if it's +shown to perform better. +--- + arch/i386/atomic.h | 2 +- + arch/x32/atomic.h | 2 +- + arch/x86_64/atomic.h | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h +index 95fecbd..25441df 100644 +--- a/arch/i386/atomic.h ++++ b/arch/i386/atomic.h +@@ -88,7 +88,7 @@ static inline void a_dec(volatile int *x) + + static inline void a_store(volatile int *p, int x) + { +- __asm__( "movl %1, %0" : "=m"(*p) : "r"(x) : "memory" ); ++ __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" ); + } + + static inline void a_spin() +diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h +index b2014cc..2ab1f7a 100644 +--- a/arch/x32/atomic.h ++++ b/arch/x32/atomic.h +@@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) + + static inline void a_store(volatile int *p, int x) + { +- __asm__( "mov %1, %0" : "=m"(*p) : "r"(x) : "memory" ); ++ __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); + } + + static inline void a_spin() +diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h +index b2014cc..2ab1f7a 100644 +--- a/arch/x86_64/atomic.h ++++ b/arch/x86_64/atomic.h +@@ -83,7 +83,7 @@ static inline void a_dec(volatile int *x) + + static inline void a_store(volatile int *p, int x) + { +- __asm__( "mov %1, %0" : "=m"(*p) : "r"(x) : "memory" ); ++ __asm__( "mov %1, %0 ; mfence" : "=m"(*p) : "r"(x) : "memory" ); + } + + static inline void a_spin() +-- +2.4.6 + |