diff options
Diffstat (limited to 'libpthread/nptl/sysdeps')
28 files changed, 158 insertions, 158 deletions
| diff --git a/libpthread/nptl/sysdeps/i386/i686/tls.h b/libpthread/nptl/sysdeps/i386/i686/tls.h index 4025ed8d2..928d269dc 100644 --- a/libpthread/nptl/sysdeps/i386/i686/tls.h +++ b/libpthread/nptl/sysdeps/i386/i686/tls.h @@ -25,9 +25,9 @@  /* Macros to load from and store into segment registers.  We can use     the 32-bit instructions.  */  #define TLS_GET_GS() \ -  ({ int __seg; __asm ("movl %%gs, %0" : "=q" (__seg)); __seg; }) +  ({ int __seg; __asm__ ("movl %%gs, %0" : "=q" (__seg)); __seg; })  #define TLS_SET_GS(val) \ -  __asm ("movl %0, %%gs" :: "q" (val)) +  __asm__ ("movl %0, %%gs" :: "q" (val))  /* Get the full set of definitions.  */ diff --git a/libpthread/nptl/sysdeps/i386/pthreaddef.h b/libpthread/nptl/sysdeps/i386/pthreaddef.h index 81456a4fc..f9f5645dc 100644 --- a/libpthread/nptl/sysdeps/i386/pthreaddef.h +++ b/libpthread/nptl/sysdeps/i386/pthreaddef.h @@ -41,8 +41,8 @@  #define __exit_thread_inline(val) \    while (1) {								      \      if (__builtin_constant_p (val) && (val) == 0)			      \ -      __asm__ volatile ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit));	      \ +      __asm__ __volatile__ ("xorl %%ebx, %%ebx; int $0x80" :: "a" (__NR_exit));	      \      else								      \ -      __asm__ volatile ("movl %1, %%ebx; int $0x80"				      \ +      __asm__ __volatile__ ("movl %1, %%ebx; int $0x80"				      \  		    :: "a" (__NR_exit), "r" (val));			      \    } diff --git a/libpthread/nptl/sysdeps/i386/tls.h b/libpthread/nptl/sysdeps/i386/tls.h index 5f27d8fec..ac547953a 100644 --- a/libpthread/nptl/sysdeps/i386/tls.h +++ b/libpthread/nptl/sysdeps/i386/tls.h @@ -232,7 +232,7 @@ union user_desc_init       _segdescr.vals[3] = 0x51;						      \  									      \       /* Install the TLS.  */						      \ -     __asm__ volatile (TLS_LOAD_EBX						  \ +     __asm__ __volatile__ (TLS_LOAD_EBX						  \  		   "int $0x80\n\t"					      \  		   TLS_LOAD_EBX						      \  		   : "=a" (_result), "=m" (_segdescr.desc.entry_number)	      \ @@ -262,7 +262,7 @@ union user_desc_init  /* Return the thread descriptor for the current thread. -   The contained asm must *not* be marked volatile since otherwise +   The contained asm must *not* be marked __volatile__ since otherwise     assignments like          pthread_descr self = thread_self();     do not get optimized away.  */ @@ -282,11 +282,11 @@ union user_desc_init  # define THREAD_GETMEM(descr, member) \    ({ __typeof (descr->member) __value;					      \       if (sizeof (__value) == 1)						      \ -       __asm__ volatile ("movb %%gs:%P2,%b0"				    \ +       __asm__ __volatile__ ("movb %%gs:%P2,%b0"				    \  		     : "=q" (__value)					      \  		     : "0" (0), "i" (offsetof (struct pthread, member)));     \       else if (sizeof (__value) == 4)					      \ -       __asm__ volatile ("movl %%gs:%P1,%0"					    \ +       __asm__ __volatile__ ("movl %%gs:%P1,%0"					    \  		     : "=r" (__value)					      \  		     : "i" (offsetof (struct pthread, member)));	      \       else								      \ @@ -296,7 +296,7 @@ union user_desc_init  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movl %%gs:%P1,%%eax\n\t"			      \ +	 __asm__ __volatile__ ("movl %%gs:%P1,%%eax\n\t"			      \  		       "movl %%gs:%P2,%%edx"				      \  		       : "=A" (__value)					      \  		       : "i" (offsetof (struct pthread, member)),	      \ @@ -309,12 +309,12 @@ union user_desc_init  # define THREAD_GETMEM_NC(descr, member, idx) \    ({ __typeof (descr->member[0]) __value;				      \       if (sizeof (__value) == 1)						      \ -       __asm__ volatile ("movb %%gs:%P2(%3),%b0"				      \ +       __asm__ __volatile__ ("movb %%gs:%P2(%3),%b0"				      \  		     : "=q" (__value)					      \  		     : "0" (0), "i" (offsetof (struct pthread, member[0])),   \  		     "r" (idx));					      \       else if (sizeof (__value) == 4)					      \ -       __asm__ volatile ("movl %%gs:%P1(,%2,4),%0"				      \ +       __asm__ __volatile__ ("movl %%gs:%P1(,%2,4),%0"				      \  		     : "=r" (__value)					      \  		     : "i" (offsetof (struct pthread, member[0])),	      \  		       "r" (idx));					      \ @@ -325,7 +325,7 @@ union user_desc_init  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile  ("movl %%gs:%P1(,%2,8),%%eax\n\t"		      \ +	 __asm__ __volatile__  ("movl %%gs:%P1(,%2,8),%%eax\n\t"		      \  			"movl %%gs:4+%P1(,%2,8),%%edx"			      \  			: "=&A" (__value)				      \  			: "i" (offsetof (struct pthread, member[0])),	      \ @@ -337,11 +337,11 @@ union user_desc_init  /* Same as THREAD_SETMEM, but the member offset can be non-constant.  */  # define THREAD_SETMEM(descr, member, value) \    ({ if (sizeof (descr->member) == 1)					      \ -       __asm__ volatile ("movb %b0,%%gs:%P1" :				      \ +       __asm__ __volatile__ ("movb %b0,%%gs:%P1" :				      \  		     : "iq" (value),					      \  		       "i" (offsetof (struct pthread, member)));	      \       else if (sizeof (descr->member) == 4)				      \ -       __asm__ volatile ("movl %0,%%gs:%P1" :				      \ +       __asm__ __volatile__ ("movl %0,%%gs:%P1" :				      \  		     : "ir" (value),					      \  		       "i" (offsetof (struct pthread, member)));	      \       else								      \ @@ -351,7 +351,7 @@ union user_desc_init  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movl %%eax,%%gs:%P1\n\t"			      \ +	 __asm__ __volatile__ ("movl %%eax,%%gs:%P1\n\t"			      \  		       "movl %%edx,%%gs:%P2" :				      \  		       : "A" (value),					      \  			 "i" (offsetof (struct pthread, member)),	      \ @@ -362,12 +362,12 @@ union user_desc_init  /* Set member of the thread descriptor directly.  */  # define THREAD_SETMEM_NC(descr, member, idx, value) \    ({ if (sizeof (descr->member[0]) == 1)				      \ -       __asm__ volatile ("movb %b0,%%gs:%P1(%2)" :				      \ +       __asm__ __volatile__ ("movb %b0,%%gs:%P1(%2)" :				      \  		     : "iq" (value),					      \  		       "i" (offsetof (struct pthread, member)),		      \  		       "r" (idx));					      \       else if (sizeof (descr->member[0]) == 4)				      \ -       __asm__ volatile ("movl %0,%%gs:%P1(,%2,4)" :			      \ +       __asm__ __volatile__ ("movl %0,%%gs:%P1(,%2,4)" :			      \  		     : "ir" (value),					      \  		       "i" (offsetof (struct pthread, member)),		      \  		       "r" (idx));					      \ @@ -378,7 +378,7 @@ union user_desc_init  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movl %%eax,%%gs:%P1(,%2,8)\n\t"			      \ +	 __asm__ __volatile__ ("movl %%eax,%%gs:%P1(,%2,8)\n\t"			      \  		       "movl %%edx,%%gs:4+%P1(,%2,8)" :			      \  		       : "A" (value),					      \  			 "i" (offsetof (struct pthread, member)),	      \ @@ -391,7 +391,7 @@ union user_desc_init    ({ __typeof (descr->member) __ret;					      \       __typeof (oldval) __old = (oldval);				      \       if (sizeof (descr->member) == 4)					      \ -       __asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3"		      \ +       __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%gs:%P3"		      \  		     : "=a" (__ret)					      \  		     : "0" (__old), "r" (newval),			      \  		       "i" (offsetof (struct pthread, member)));	      \ @@ -404,7 +404,7 @@ union user_desc_init  /* Atomic logical and.  */  #define THREAD_ATOMIC_AND(descr, member, val) \    (void) ({ if (sizeof ((descr)->member) == 4)				      \ -	      __asm__ volatile (LOCK_PREFIX "andl %1, %%gs:%P0"		      \ +	      __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%gs:%P0"		      \  			    :: "i" (offsetof (struct pthread, member)),	      \  			       "ir" (val));				      \  	    else							      \ @@ -415,7 +415,7 @@ union user_desc_init  /* Atomic set bit.  */  #define THREAD_ATOMIC_BIT_SET(descr, member, bit) \    (void) ({ if (sizeof ((descr)->member) == 4)				      \ -	      __asm__ volatile (LOCK_PREFIX "orl %1, %%gs:%P0"		      \ +	      __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%gs:%P0"		      \  			    :: "i" (offsetof (struct pthread, member)),	      \  			       "ir" (1 << (bit)));			      \  	    else							      \ @@ -427,7 +427,7 @@ union user_desc_init  #define CALL_THREAD_FCT(descr) \    ({ void *__res;							      \       int __ignore1, __ignore2;						      \ -     __asm__ volatile ("pushl %%eax\n\t"					      \ +     __asm__ __volatile__ ("pushl %%eax\n\t"					      \  		   "pushl %%eax\n\t"					      \  		   "pushl %%eax\n\t"					      \  		   "pushl %%gs:%P4\n\t"					      \ @@ -462,7 +462,7 @@ union user_desc_init  #define THREAD_GSCOPE_RESET_FLAG() \    do									      \      { int __res;							      \ -      __asm__ volatile ("xchgl %0, %%gs:%P1"				      \ +      __asm__ __volatile__ ("xchgl %0, %%gs:%P1"				      \  		    : "=r" (__res)					      \  		    : "i" (offsetof (struct pthread, header.gscope_flag)),    \  		      "0" (THREAD_GSCOPE_FLAG_UNUSED));			      \ diff --git a/libpthread/nptl/sysdeps/pthread/pt-initfini.c b/libpthread/nptl/sysdeps/pthread/pt-initfini.c index b26a50456..1f81144d0 100644 --- a/libpthread/nptl/sysdeps/pthread/pt-initfini.c +++ b/libpthread/nptl/sysdeps/pthread/pt-initfini.c @@ -43,13 +43,13 @@  #define SECTION(x) __asm__ (".section " x )  /* Embed an #include to pull in the alignment and .end directives. */ -asm ("\n#include \"defs.h\""); +__asm__ ("\n#include \"defs.h\"");  /* The initial common code ends here. */ -asm ("\n/*@HEADER_ENDS*/"); +__asm__ ("\n/*@HEADER_ENDS*/");  /* To determine whether we need .end and .align: */ -asm ("\n/*@TESTS_BEGIN*/"); +__asm__ ("\n/*@TESTS_BEGIN*/");  extern void dummy (void (*foo) (void));  void  dummy (void (*foo) (void)) @@ -57,10 +57,10 @@ dummy (void (*foo) (void))    if (foo)      (*foo) ();  } -asm ("\n/*@TESTS_END*/"); +__asm__ ("\n/*@TESTS_END*/");  /* The beginning of _init:  */ -asm ("\n/*@_init_PROLOG_BEGINS*/"); +__asm__ ("\n/*@_init_PROLOG_BEGINS*/");  static void  call_initialize_minimal (void) @@ -79,18 +79,18 @@ _init (void)    /* The very first thing we must do is to set up the registers.  */    call_initialize_minimal (); -  asm ("ALIGN"); -  asm("END_INIT"); +  __asm__ ("ALIGN"); +  __asm__("END_INIT");    /* Now the epilog. */ -  asm ("\n/*@_init_PROLOG_ENDS*/"); -  asm ("\n/*@_init_EPILOG_BEGINS*/"); +  __asm__ ("\n/*@_init_PROLOG_ENDS*/"); +  __asm__ ("\n/*@_init_EPILOG_BEGINS*/");    SECTION(".init");  } -asm ("END_INIT"); +__asm__ ("END_INIT");  /* End of the _init epilog, beginning of the _fini prolog. */ -asm ("\n/*@_init_EPILOG_ENDS*/"); -asm ("\n/*@_fini_PROLOG_BEGINS*/"); +__asm__ ("\n/*@_init_EPILOG_ENDS*/"); +__asm__ ("\n/*@_fini_PROLOG_BEGINS*/");  SECTION (".fini");  extern void __attribute__ ((section (".fini"))) _fini (void); @@ -99,9 +99,9 @@ _fini (void)  {    /* End of the _fini prolog. */ -  asm ("ALIGN"); -  asm ("END_FINI"); -  asm ("\n/*@_fini_PROLOG_ENDS*/"); +  __asm__ ("ALIGN"); +  __asm__ ("END_FINI"); +  __asm__ ("\n/*@_fini_PROLOG_ENDS*/");    {      /* Let GCC know that _fini is not a leaf function by having a dummy @@ -112,14 +112,14 @@ _fini (void)    }    /* Beginning of the _fini epilog. */ -  asm ("\n/*@_fini_EPILOG_BEGINS*/"); +  __asm__ ("\n/*@_fini_EPILOG_BEGINS*/");    SECTION (".fini");  } -asm ("END_FINI"); +__asm__ ("END_FINI");  /* End of the _fini epilog.  Any further generated assembly (e.g. .ident)     is shared between both crt files. */ -asm ("\n/*@_fini_EPILOG_ENDS*/"); -asm ("\n/*@TRAILER_BEGINS*/"); +__asm__ ("\n/*@_fini_EPILOG_ENDS*/"); +__asm__ ("\n/*@TRAILER_BEGINS*/");  /* End of file. */ diff --git a/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c index 273c8bb3f..40b8d98c9 100644 --- a/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c +++ b/libpthread/nptl/sysdeps/pthread/unwind-forcedunwind.c @@ -50,7 +50,7 @@ pthread_cancel_init (void)    if (__builtin_expect (libgcc_s_handle != NULL, 1))      {        /* Force gcc to reload all values.  */ -      __asm__ volatile ("" ::: "memory"); +      __asm__ __volatile__ ("" ::: "memory");        return;      } diff --git a/libpthread/nptl/sysdeps/sh/pthread_spin_lock.c b/libpthread/nptl/sysdeps/sh/pthread_spin_lock.c index 2c72eb610..74fad3388 100644 --- a/libpthread/nptl/sysdeps/sh/pthread_spin_lock.c +++ b/libpthread/nptl/sysdeps/sh/pthread_spin_lock.c @@ -24,7 +24,7 @@ pthread_spin_lock (pthread_spinlock_t *lock)    unsigned int val;    do -    __asm__ volatile ("tas.b @%1; movt %0" +    __asm__ __volatile__ ("tas.b @%1; movt %0"  		  : "=&r" (val)  		  : "r" (lock)  		  : "memory"); diff --git a/libpthread/nptl/sysdeps/sh/pthreaddef.h b/libpthread/nptl/sysdeps/sh/pthreaddef.h index c1902fb54..dc6a4f907 100644 --- a/libpthread/nptl/sysdeps/sh/pthreaddef.h +++ b/libpthread/nptl/sysdeps/sh/pthreaddef.h @@ -41,9 +41,9 @@  #define __exit_thread_inline(val) \    while (1) {								      \      if (__builtin_constant_p (val) && (val) == 0)			      \ -      __asm__ volatile ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD  \ +      __asm__ __volatile__ ("mov #0,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD  \  		   :: "i" (__NR_exit));  \      else								      \ -      __asm__ volatile ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD  \ +      __asm__ __volatile__ ("mov %1,r4; mov %0,r3; trapa #0x11\n\t" SYSCALL_INST_PAD  \  		    :: "i" (__NR_exit), "r" (val));			      \    } diff --git a/libpthread/nptl/sysdeps/sh/tls.h b/libpthread/nptl/sysdeps/sh/tls.h index 2c538eded..bbb11187a 100644 --- a/libpthread/nptl/sysdeps/sh/tls.h +++ b/libpthread/nptl/sysdeps/sh/tls.h @@ -94,7 +94,7 @@ typedef struct  /* Install new dtv for current thread.  */  # define INSTALL_NEW_DTV(dtv) \    ({ tcbhead_t *__tcbp;							      \ -     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));			      \ +     __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp));			      \       __tcbp->dtv = (dtv);})  /* Return dtv of given thread descriptor.  */ @@ -105,12 +105,12 @@ typedef struct     special attention since 'errno' is not yet available and if the     operation can cause a failure 'errno' must not be touched.  */  # define TLS_INIT_TP(tcbp, secondcall) \ -  ({ __asm __volatile ("ldc %0,gbr" : : "r" (tcbp)); 0; }) +  ({ __asm__ __volatile__ ("ldc %0,gbr" : : "r" (tcbp)); 0; })  /* Return the address of the dtv for the current thread.  */  # define THREAD_DTV() \    ({ tcbhead_t *__tcbp;							      \ -     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));			      \ +     __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp));			      \       __tcbp->dtv;})  /* Return the thread descriptor for the current thread. @@ -120,7 +120,7 @@ typedef struct     do not get optimized away.  */  # define THREAD_SELF \    ({ struct pthread *__self;						      \ -     __asm ("stc gbr,%0" : "=r" (__self));				      \ +     __asm__ ("stc gbr,%0" : "=r" (__self));				      \       __self - 1;})  /* Magic for libthread_db to know how to do THREAD_SELF.  */ @@ -143,15 +143,15 @@ typedef struct  #define THREAD_GET_POINTER_GUARD() \    ({ tcbhead_t *__tcbp;							      \ -     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));			      \ +     __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp));			      \       __tcbp->pointer_guard;})   #define THREAD_SET_POINTER_GUARD(value) \    ({ tcbhead_t *__tcbp;							      \ -     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));			      \ +     __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp));			      \       __tcbp->pointer_guard = (value);})  #define THREAD_COPY_POINTER_GUARD(descr) \    ({ tcbhead_t *__tcbp;							      \ -     __asm __volatile ("stc gbr,%0" : "=r" (__tcbp));			      \ +     __asm__ __volatile__ ("stc gbr,%0" : "=r" (__tcbp));			      \       ((tcbhead_t *) (descr + 1))->pointer_guard	= __tcbp->pointer_guard;})  /* Get and set the global scope generation counter in struct pthread.  */ diff --git a/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_lock.c b/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_lock.c index d3c6e3049..c263bb7c8 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_lock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_lock.c @@ -22,7 +22,7 @@  int  pthread_spin_lock (pthread_spinlock_t *lock)  { -  __asm __volatile +  __asm__ __volatile      ("1: ldstub [%0], %%g2\n"       "   orcc   %%g2, 0x0, %%g0\n"       "   bne,a  2f\n" diff --git a/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_trylock.c b/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_trylock.c index bcc3158fd..2994085ab 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_trylock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc32/pthread_spin_trylock.c @@ -24,6 +24,6 @@ int  pthread_spin_trylock (pthread_spinlock_t *lock)  {    int res; -  __asm __volatile ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory"); +  __asm__ __volatile__ ("ldstub [%1], %0" : "=r" (res) : "r" (lock) : "memory");    return res == 0 ? 0 : EBUSY;  } diff --git a/libpthread/nptl/sysdeps/sparc/sparc32/sparcv9/pthread_spin_lock.c b/libpthread/nptl/sysdeps/sparc/sparc32/sparcv9/pthread_spin_lock.c index 8880f535b..0e18621e6 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc32/sparcv9/pthread_spin_lock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc32/sparcv9/pthread_spin_lock.c @@ -22,7 +22,7 @@  int  pthread_spin_lock (pthread_spinlock_t *lock)  { -  __asm __volatile +  __asm__ __volatile      ("1: ldstub  [%0], %%g2\n"       "   brnz,pn %%g2, 2f\n"       "    membar #StoreLoad | #StoreStore\n" diff --git a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_lock.c b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_lock.c index 77171d9b9..53a3eab04 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_lock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_lock.c @@ -22,7 +22,7 @@  int  pthread_spin_lock (pthread_spinlock_t *lock)  { -  __asm __volatile +  __asm__ __volatile      ("1: ldstub  [%0], %%g5\n"       "   brnz,pn %%g5, 2f\n"       "    membar #StoreLoad | #StoreStore\n" diff --git a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_trylock.c b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_trylock.c index 2bda809da..a8ef1c216 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_trylock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_trylock.c @@ -24,7 +24,7 @@ int  pthread_spin_trylock (pthread_spinlock_t *lock)  {    int res; -  __asm __volatile +  __asm__ __volatile      ("ldstub [%1], %0\n"       "membar #StoreLoad | #StoreStore"       : "=r" (res) diff --git a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_unlock.c b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_unlock.c index 7037675a2..25a8888da 100644 --- a/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_unlock.c +++ b/libpthread/nptl/sysdeps/sparc/sparc64/pthread_spin_unlock.c @@ -24,7 +24,7 @@  int  pthread_spin_unlock (pthread_spinlock_t *lock)  { -  __asm __volatile ("membar #StoreStore | #LoadStore"); +  __asm__ __volatile__ ("membar #StoreStore | #LoadStore");    *lock = 0;    return 0;  } diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c index 8adf8b44e..86b062804 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/alpha/pthread_once.c @@ -49,7 +49,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))  	 Do this atomically.        */        newval = __fork_generation | 1; -      __asm __volatile ( +      __asm__ __volatile__ (  		"1:	ldl_l	%0, %2\n"  		"	and	%0, 2, %1\n"  		"	bne	%1, 2f\n" diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h index b0586ea1e..8f63e2510 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/bits/atomic.h @@ -72,11 +72,11 @@ void __arm_link_error (void);  /* Thumb-2 has ldrex/strex.  However it does not have barrier instructions,     so we still need to use the kernel helper.  */  #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -  ({ register __typeof (oldval) a_oldval asm ("r0");			      \ -     register __typeof (oldval) a_newval asm ("r1") = (newval);		      \ -     register __typeof (mem) a_ptr asm ("r2") = (mem);			      \ -     register __typeof (oldval) a_tmp asm ("r3");			      \ -     register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);	      \ +  ({ register __typeof (oldval) a_oldval __asm__ ("r0");			      \ +     register __typeof (oldval) a_newval __asm__ ("r1") = (newval);		      \ +     register __typeof (mem) a_ptr __asm__ ("r2") = (mem);			      \ +     register __typeof (oldval) a_tmp __asm__ ("r3");			      \ +     register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval);	      \       __asm__ __volatile__						      \  	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \  	      "cmp\t%[tmp], %[old2]\n\t"				      \ @@ -95,11 +95,11 @@ void __arm_link_error (void);       a_tmp; })  #else  #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ -  ({ register __typeof (oldval) a_oldval asm ("r0");			      \ -     register __typeof (oldval) a_newval asm ("r1") = (newval);		      \ -     register __typeof (mem) a_ptr asm ("r2") = (mem);			      \ -     register __typeof (oldval) a_tmp asm ("r3");			      \ -     register __typeof (oldval) a_oldval2 asm ("r4") = (oldval);	      \ +  ({ register __typeof (oldval) a_oldval __asm__ ("r0");			      \ +     register __typeof (oldval) a_newval __asm__ ("r1") = (newval);		      \ +     register __typeof (mem) a_ptr __asm__ ("r2") = (mem);			      \ +     register __typeof (oldval) a_tmp __asm__ ("r3");			      \ +     register __typeof (oldval) a_oldval2 __asm__ ("r4") = (oldval);	      \       __asm__ __volatile__						      \  	     ("0:\tldr\t%[tmp],[%[ptr]]\n\t"				      \  	      "cmp\t%[tmp], %[old2]\n\t"				      \ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c index 011746d0e..b6253dd6b 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c @@ -40,7 +40,7 @@ pthread_cancel_init (void)    if (__builtin_expect (libgcc_s_handle != NULL, 1))      {        /* Force gcc to reload all values.  */ -      asm volatile ("" ::: "memory"); +      __asm__ __volatile__ ("" ::: "memory");        return;      } @@ -85,7 +85,7 @@ __unwind_freeres (void)     ARM unwinder relies on register state at entrance.  So we write this in     assembly.  */ -asm ( +__asm__ (  "	.globl	_Unwind_Resume\n"  "	.type	_Unwind_Resume, %function\n"  "_Unwind_Resume:\n" diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c index cdab10e30..afafcdacf 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/arm/unwind-resume.c @@ -48,7 +48,7 @@ init (void)     ARM unwinder relies on register state at entrance.  So we write this in     assembly.  */ -asm ( +__asm__ (  "	.globl	_Unwind_Resume\n"  "	.type	_Unwind_Resume, %function\n"  "_Unwind_Resume:\n" diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h index a40d84e75..ab1a9395e 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h @@ -210,7 +210,7 @@ LLL_STUB_UNWIND_INFO_END    ({									      \      int __status;							      \      register __typeof (val) _val __asm__ ("edx") = (val);			      \ -    __asm__ __volatile (LLL_EBX_LOAD					      \ +    __asm__ __volatile__ (LLL_EBX_LOAD					      \  		      LLL_ENTER_KERNEL					      \  		      LLL_EBX_LOAD					      \  		      : "=a" (__status)					      \ @@ -226,7 +226,7 @@ LLL_STUB_UNWIND_INFO_END    do {									      \      int __ignore;							      \      register __typeof (nr) _nr __asm__ ("edx") = (nr);			      \ -    __asm__ __volatile (LLL_EBX_LOAD					      \ +    __asm__ __volatile__ (LLL_EBX_LOAD					      \  		      LLL_ENTER_KERNEL					      \  		      LLL_EBX_LOAD					      \  		      : "=a" (__ignore)					      \ @@ -254,7 +254,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_trylock(futex) \    ({ int ret;								      \ -     __asm__ __volatile (__lll_trylock_asm				      \ +     __asm__ __volatile__ (__lll_trylock_asm				      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \  			 "0" (LLL_LOCK_INITIALIZER),			      \ @@ -264,7 +264,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_trylock(futex, id) \    ({ int ret;								      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1"			      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (id), "m" (futex),				      \  			 "0" (LLL_LOCK_INITIALIZER)			      \ @@ -274,7 +274,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_cond_trylock(futex) \    ({ int ret;								      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1"			      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \  			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \ @@ -294,7 +294,7 @@ LLL_STUB_UNWIND_INFO_END    (void)								      \      ({ int ignore1, ignore2;						      \         if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ -	 __asm__ __volatile (__lll_lock_asm_start				      \ +	 __asm__ __volatile__ (__lll_lock_asm_start				      \  			   "jnz _L_lock_%=\n\t"				      \  			   ".subsection 1\n\t"				      \  			   ".type _L_lock_%=,@function\n"		      \ @@ -313,7 +313,7 @@ LLL_STUB_UNWIND_INFO_END         else								      \  	 {								      \  	   int ignore3;							      \ -	   __asm__ __volatile (__lll_lock_asm_start			      \ +	   __asm__ __volatile__ (__lll_lock_asm_start			      \  			     "jnz _L_lock_%=\n\t"			      \  			     ".subsection 1\n\t"			      \  			     ".type _L_lock_%=,@function\n"		      \ @@ -337,7 +337,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_lock(futex, id, private) \    ({ int __result, ignore1, ignore2;					      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \  		       "jnz _L_robust_lock_%=\n\t"			      \  		       ".subsection 1\n\t"				      \  		       ".type _L_robust_lock_%=,@function\n"		      \ @@ -362,7 +362,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_cond_lock(futex, private) \    (void)								      \      ({ int ignore1, ignore2, ignore3;					      \ -       __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \ +       __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t"		      \  			 "jnz _L_cond_lock_%=\n\t"			      \  			 ".subsection 1\n\t"				      \  			 ".type _L_cond_lock_%=,@function\n"		      \ @@ -384,7 +384,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_cond_lock(futex, id, private) \    ({ int __result, ignore1, ignore2;					      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %2\n\t"			      \  		       "jnz _L_robust_cond_lock_%=\n\t"			      \  		       ".subsection 1\n\t"				      \  		       ".type _L_robust_cond_lock_%=,@function\n"	      \ @@ -407,7 +407,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_timedlock(futex, timeout, private) \    ({ int __result, ignore1, ignore2, ignore3;				      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \  		       "jnz _L_timedlock_%=\n\t"			      \  		       ".subsection 1\n\t"				      \  		       ".type _L_timedlock_%=,@function\n"		      \ @@ -430,7 +430,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_timedlock(futex, timeout, id, private) \    ({ int __result, ignore1, ignore2, ignore3;				      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %3\n\t"			      \  		       "jnz _L_robust_timedlock_%=\n\t"			      \  		       ".subsection 1\n\t"				      \  		       ".type _L_robust_timedlock_%=,@function\n"	      \ @@ -463,7 +463,7 @@ LLL_STUB_UNWIND_INFO_END    (void)								      \      ({ int ignore;							      \         if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ -	 __asm__ __volatile (__lll_unlock_asm				      \ +	 __asm__ __volatile__ (__lll_unlock_asm				      \  			   "jne _L_unlock_%=\n\t"			      \  			   ".subsection 1\n\t"				      \  			   ".type _L_unlock_%=,@function\n"		      \ @@ -481,7 +481,7 @@ LLL_STUB_UNWIND_INFO_END         else								      \  	 {								      \  	   int ignore2;							      \ -	   __asm__ __volatile (__lll_unlock_asm				      \ +	   __asm__ __volatile__ (__lll_unlock_asm				      \  			     "jne _L_unlock_%=\n\t"			      \  			     ".subsection 1\n\t"			      \  			     ".type _L_unlock_%=,@function\n"		      \ @@ -504,7 +504,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_unlock(futex, private) \    (void)								      \      ({ int ignore, ignore2;						      \ -       __asm__ __volatile (LOCK_INSTR "andl %3, %0\n\t"			      \ +       __asm__ __volatile__ (LOCK_INSTR "andl %3, %0\n\t"			      \  			 "jne _L_robust_unlock_%=\n\t"			      \  			 ".subsection 1\n\t"				      \  			 ".type _L_robust_unlock_%=,@function\n"	      \ @@ -528,7 +528,7 @@ LLL_STUB_UNWIND_INFO_END    (void)								      \      ({ int __ignore;							      \         register int _nr __asm__ ("edx") = 1;				      \ -       __asm__ __volatile (LOCK_INSTR "orl %5, (%2)\n\t"			      \ +       __asm__ __volatile__ (LOCK_INSTR "orl %5, (%2)\n\t"			      \  			 LLL_EBX_LOAD					      \  			 LLL_ENTER_KERNEL				      \  			 LLL_EBX_LOAD					      \ @@ -553,7 +553,7 @@ LLL_STUB_UNWIND_INFO_END      int __ignore;							      \      register __typeof (tid) _tid __asm__ ("edx") = (tid);			      \      if (_tid != 0)							      \ -      __asm__ __volatile (LLL_EBX_LOAD					      \ +      __asm__ __volatile__ (LLL_EBX_LOAD					      \  			"1:\tmovl %1, %%eax\n\t"			      \  			LLL_ENTER_KERNEL				      \  			"cmpl $0, (%%ebx)\n\t"				      \ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h index ecfa9702e..2c2557d56 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h @@ -164,7 +164,7 @@  /* Set *futex to ID if it is 0, atomically.  Returns the old value */  #define __lll_robust_trylock(futex, id) \    ({ int __val;								      \ -     __asm __volatile ("1:	lwarx	%0,0,%2" MUTEX_HINT_ACQ "\n"	      \ +     __asm__ __volatile__ ("1:	lwarx	%0,0,%2" MUTEX_HINT_ACQ "\n"	      \  		       "	cmpwi	0,%0,0\n"			      \  		       "	bne	2f\n"				      \  		       "	stwcx.	%3,0,%2\n"			      \ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c index bc5f3f0f1..a495b64f3 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_once.c @@ -52,7 +52,7 @@ __pthread_once (pthread_once_t *once_control, void (*init_routine) (void))  	 Do this atomically.        */        newval = __fork_generation | 1; -      __asm __volatile ("1:	lwarx	%0,0,%3\n" +      __asm__ __volatile__ ("1:	lwarx	%0,0,%3\n"  			"	andi.	%1,%0,2\n"  			"	bne	2f\n"  			"	stwcx.	%4,0,%3\n" diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c index 90f2dc67c..4be31ff6b 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/pthread_spin_unlock.c @@ -23,7 +23,7 @@  int  pthread_spin_unlock (pthread_spinlock_t *lock)  { -  __asm __volatile (__lll_rel_instr ::: "memory"); +  __asm__ __volatile__ (__lll_rel_instr ::: "memory");    *lock = 0;    return 0;  } diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c index 0082c570a..cdc8be7e7 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/powerpc/sem_post.c @@ -29,9 +29,9 @@ __new_sem_post (sem_t *sem)  {    struct new_sem *isem = (struct new_sem *) sem; -  __asm __volatile (__lll_rel_instr ::: "memory"); +  __asm__ __volatile__ (__lll_rel_instr ::: "memory");    atomic_increment (&isem->value); -  __asm __volatile (__lll_acq_instr ::: "memory"); +  __asm__ __volatile__ (__lll_acq_instr ::: "memory");    if (isem->nwaiters > 0)      {        int err = lll_futex_wake (&isem->value, 1, diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h index 19ce7fe40..0ea67e0ef 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/sh/lowlevellock.h @@ -99,7 +99,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_trylock(futex) \    ({ unsigned char __result; \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -121,7 +121,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_trylock(futex, id)	\    ({ unsigned char __result; \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -143,7 +143,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_cond_trylock(futex) \    ({ unsigned char __result; \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -165,7 +165,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_lock(futex, private) \    (void) ({ int __result, *__futex = &(futex); \ -	    __asm __volatile ("\ +	    __asm__ __volatile__ ("\  		.align 2\n\  		mova 1f,r0\n\  		nop\n\ @@ -190,7 +190,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_lock(futex, id, private) \    ({ int __result, *__futex = &(futex); \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -211,7 +211,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;     always wakeup waiters.  */  #define lll_cond_lock(futex, private) \    (void) ({ int __result, *__futex = &(futex); \ -	    __asm __volatile ("\ +	    __asm__ __volatile__ ("\  		.align 2\n\  		mova 1f,r0\n\  		nop\n\ @@ -229,7 +229,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_cond_lock(futex, id, private) \    ({ int __result, *__futex = &(futex); \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -248,7 +248,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_timedlock(futex, timeout, private) \    ({ int __result, *__futex = &(futex); \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -267,7 +267,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_timedlock(futex, timeout, id, private) \    ({ int __result, *__futex = &(futex); \ -     __asm __volatile ("\ +     __asm__ __volatile__ ("\  	.align 2\n\  	mova 1f,r0\n\  	nop\n\ @@ -287,7 +287,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_unlock(futex, private) \    (void) ({ int __result, *__futex = &(futex); \ -	    __asm __volatile ("\ +	    __asm__ __volatile__ ("\  		.align 2\n\  		mova 1f,r0\n\  		mov r15,r1\n\ @@ -310,7 +310,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_unlock(futex, private) \    (void) ({ int __result, *__futex = &(futex); \ -	    __asm __volatile ("\ +	    __asm__ __volatile__ ("\  		.align 2\n\  		mova 1f,r0\n\  		mov r15,r1\n\ @@ -326,7 +326,7 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_robust_dead(futex, private)		       \    (void) ({ int __ignore, *__futex = &(futex); \ -	    __asm __volatile ("\ +	    __asm__ __volatile__ ("\  		.align 2\n\  		mova 1f,r0\n\  		mov r15,r1\n\ @@ -354,13 +354,13 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_futex_timed_wait(futex, val, timeout, private) \    ({									      \      int __status;							      \ -    register unsigned long __r3 __asm ("r3") = SYS_futex;			      \ -    register unsigned long __r4 __asm ("r4") = (unsigned long) (futex);	      \ -    register unsigned long __r5 __asm ("r5")				      \ +    register unsigned long __r3 __asm__ ("r3") = SYS_futex;			      \ +    register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex);	      \ +    register unsigned long __r5 __asm__ ("r5")				      \        = __lll_private_flag (FUTEX_WAIT, private);			      \ -    register unsigned long __r6 __asm ("r6") = (unsigned long) (val);	      \ -    register unsigned long __r7 __asm ("r7") = (timeout);			      \ -    __asm __volatile (SYSCALL_WITH_INST_PAD				      \ +    register unsigned long __r6 __asm__ ("r6") = (unsigned long) (val);	      \ +    register unsigned long __r7 __asm__ ("r7") = (timeout);			      \ +    __asm__ __volatile__ (SYSCALL_WITH_INST_PAD				      \  		      : "=z" (__status)					      \  		      : "r" (__r3), "r" (__r4), "r" (__r5),		      \  			"r" (__r6), "r" (__r7)				      \ @@ -372,13 +372,13 @@ extern int __lll_unlock_wake (int *__futex, int private) attribute_hidden;  #define lll_futex_wake(futex, nr, private) \    do {									      \      int __ignore;							      \ -    register unsigned long __r3 __asm ("r3") = SYS_futex;			      \ -    register unsigned long __r4 __asm ("r4") = (unsigned long) (futex);	      \ -    register unsigned long __r5 __asm ("r5")				      \ +    register unsigned long __r3 __asm__ ("r3") = SYS_futex;			      \ +    register unsigned long __r4 __asm__ ("r4") = (unsigned long) (futex);	      \ +    register unsigned long __r5 __asm__ ("r5")				      \        = __lll_private_flag (FUTEX_WAKE, private);			      \ -    register unsigned long __r6 __asm ("r6") = (unsigned long) (nr);	      \ -    register unsigned long __r7 __asm ("r7") = 0;				      \ -    __asm __volatile (SYSCALL_WITH_INST_PAD				      \ +    register unsigned long __r6 __asm__ ("r6") = (unsigned long) (nr);	      \ +    register unsigned long __r7 __asm__ ("r7") = 0;				      \ +    __asm__ __volatile__ (SYSCALL_WITH_INST_PAD				      \  		      : "=z" (__ignore)					      \  		      : "r" (__r3), "r" (__r4), "r" (__r5),		      \  			"r" (__r6), "r" (__r7)				      \ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h index 7c042fc80..ffab81eb5 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h @@ -212,7 +212,7 @@ LLL_STUB_UNWIND_INFO_END      register const struct timespec *__to __asm__ ("r10") = timeout;	      \      int __status;							      \      register __typeof (val) _val __asm__ ("edx") = (val);			      \ -    __asm__ __volatile ("syscall"						      \ +    __asm__ __volatile__ ("syscall"						      \  		      : "=a" (__status)					      \  		      : "0" (SYS_futex), "D" (futex),			      \  			"S" (__lll_private_flag (FUTEX_WAIT, private)),	      \ @@ -226,7 +226,7 @@ LLL_STUB_UNWIND_INFO_END    do {									      \      int __ignore;							      \      register __typeof (nr) _nr __asm__ ("edx") = (nr);			      \ -    __asm__ __volatile ("syscall"						      \ +    __asm__ __volatile__ ("syscall"						      \  		      : "=a" (__ignore)					      \  		      : "0" (SYS_futex), "D" (futex),			      \  			"S" (__lll_private_flag (FUTEX_WAKE, private)),	      \ @@ -253,7 +253,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_trylock(futex) \    ({ int ret;								      \ -     __asm__ __volatile (__lll_trylock_asm				      \ +     __asm__ __volatile__ (__lll_trylock_asm				      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (LLL_LOCK_INITIALIZER_LOCKED), "m" (futex),      \  			 "0" (LLL_LOCK_INITIALIZER)			      \ @@ -262,7 +262,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_trylock(futex, id) \    ({ int ret;								      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1"			      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (id), "m" (futex),	"0" (LLL_LOCK_INITIALIZER)    \  		       : "memory");					      \ @@ -270,7 +270,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_cond_trylock(futex) \    ({ int ret;								      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %2, %1"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %2, %1"			      \  		       : "=a" (ret), "=m" (futex)			      \  		       : "r" (LLL_LOCK_INITIALIZER_WAITERS),		      \  			 "m" (futex), "0" (LLL_LOCK_INITIALIZER)	      \ @@ -294,7 +294,7 @@ LLL_STUB_UNWIND_INFO_END    (void)								      \      ({ int ignore1, ignore2, ignore3;					      \         if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ -	 __asm__ __volatile (__lll_lock_asm_start				      \ +	 __asm__ __volatile__ (__lll_lock_asm_start				      \  			   ".subsection 1\n\t"				      \  			   ".type _L_lock_%=, @function\n"		      \  			   "_L_lock_%=:\n"				      \ @@ -312,7 +312,7 @@ LLL_STUB_UNWIND_INFO_END  			   : "0" (1), "m" (futex), "3" (0)		      \  			   : "cx", "r11", "cc", "memory");		      \         else								      \ -	 __asm__ __volatile (__lll_lock_asm_start				      \ +	 __asm__ __volatile__ (__lll_lock_asm_start				      \  			   ".subsection 1\n\t"				      \  			   ".type _L_lock_%=, @function\n"		      \  			   "_L_lock_%=:\n"				      \ @@ -333,7 +333,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_lock(futex, id, private) \    ({ int result, ignore1, ignore2;					      \ -    __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \ +    __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \  		      "jnz 1f\n\t"					      \  		      ".subsection 1\n\t"				      \  		      ".type _L_robust_lock_%=, @function\n"		      \ @@ -356,7 +356,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_cond_lock(futex, private) \    (void)								      \      ({ int ignore1, ignore2, ignore3;					      \ -       __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \ +       __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t"		      \  			 "jnz 1f\n\t"					      \  			 ".subsection 1\n\t"				      \  			 ".type _L_cond_lock_%=, @function\n"		      \ @@ -378,7 +378,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_cond_lock(futex, id, private) \    ({ int result, ignore1, ignore2;					      \ -    __asm__ __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \ +    __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %4, %2\n\t"			      \  		      "jnz 1f\n\t"					      \  		      ".subsection 1\n\t"				      \  		      ".type _L_robust_cond_lock_%=, @function\n"	      \ @@ -401,7 +401,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_timedlock(futex, timeout, private) \    ({ int result, ignore1, ignore2, ignore3;				      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \  		       "jnz 1f\n\t"					      \  		       ".subsection 1\n\t"				      \  		       ".type _L_timedlock_%=, @function\n"		      \ @@ -425,7 +425,7 @@ LLL_STUB_UNWIND_INFO_END  #define lll_robust_timedlock(futex, timeout, id, private) \    ({ int result, ignore1, ignore2, ignore3;				      \ -     __asm__ __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \ +     __asm__ __volatile__ (LOCK_INSTR "cmpxchgl %1, %4\n\t"			      \  		       "jnz 1f\n\t"					      \  		       ".subsection 1\n\t"				      \  		       ".type _L_robust_timedlock_%=, @function\n"	      \ @@ -464,7 +464,7 @@ LLL_STUB_UNWIND_INFO_END    (void)								      \      ({ int ignore;							      \         if (__builtin_constant_p (private) && (private) == LLL_PRIVATE)	      \ -	 __asm__ __volatile (__lll_unlock_asm_start			      \ +	 __asm__ __volatile__ (__lll_unlock_asm_start			      \  			   ".subsection 1\n\t"				      \  			   ".type _L_unlock_%=, @function\n"		      \  			   "_L_unlock_%=:\n"				      \ @@ -481,7 +481,7 @@ LLL_STUB_UNWIND_INFO_END  			   : "m" (futex)				      \  			   : "ax", "cx", "r11", "cc", "memory");	      \         else								      \ -	 __asm__ __volatile (__lll_unlock_asm_start			      \ +	 __asm__ __volatile__ (__lll_unlock_asm_start			      \  			   ".subsection 1\n\t"				      \  			   ".type _L_unlock_%=, @function\n"		      \  			   "_L_unlock_%=:\n"				      \ @@ -503,7 +503,7 @@ LLL_STUB_UNWIND_INFO_END    do									      \      {									      \        int ignore;							      \ -      __asm__ __volatile (LOCK_INSTR "andl %2, %0\n\t"			      \ +      __asm__ __volatile__ (LOCK_INSTR "andl %2, %0\n\t"			      \  			"jne 1f\n\t"					      \  			".subsection 1\n\t"				      \  			".type _L_robust_unlock_%=, @function\n"	      \ @@ -528,7 +528,7 @@ LLL_STUB_UNWIND_INFO_END    do									      \      {									      \        int ignore;							      \ -      __asm__ __volatile (LOCK_INSTR "orl %3, (%2)\n\t"			      \ +      __asm__ __volatile__ (LOCK_INSTR "orl %3, (%2)\n\t"			      \  			"syscall"					      \  			: "=m" (futex), "=a" (ignore)			      \  			: "D" (&(futex)), "i" (FUTEX_OWNER_DIED),	      \ @@ -544,7 +544,7 @@ LLL_STUB_UNWIND_INFO_END       register int __nr_move __asm__ ("r10") = nr_move;			      \       register void *__mutex __asm__ ("r8") = mutex;			      \       register int __val __asm__ ("r9") = val;				      \ -     __asm__ __volatile ("syscall"					      \ +     __asm__ __volatile__ ("syscall"					      \  		       : "=a" (__res)					      \  		       : "0" (__NR_futex), "D" ((void *) ftx),		      \  			 "S" (__lll_private_flag (FUTEX_CMP_REQUEUE,	      \ @@ -568,7 +568,7 @@ LLL_STUB_UNWIND_INFO_END      int __ignore;							      \      register __typeof (tid) _tid __asm__ ("edx") = (tid);			      \      if (_tid != 0)							      \ -      __asm__ __volatile ("xorq %%r10, %%r10\n\t"				      \ +      __asm__ __volatile__ ("xorq %%r10, %%r10\n\t"				      \  			"1:\tmovq %2, %%rax\n\t"			      \  			"syscall\n\t"					      \  			"cmpl $0, (%%rdi)\n\t"				      \ diff --git a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c index 640d3044f..e60113a76 100644 --- a/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c +++ b/libpthread/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_setaffinity.c @@ -2,7 +2,7 @@  #define RESET_VGETCPU_CACHE() \    do {			      \ -    asm volatile ("movl %0, %%fs:%P1\n\t"				      \ +    __asm__ __volatile__ ("movl %0, %%fs:%P1\n\t"				      \  		  "movl %0, %%fs:%P2"					      \  		  :							      \  		  : "ir" (0), "i" (offsetof (struct pthread,		      \ diff --git a/libpthread/nptl/sysdeps/x86_64/pthreaddef.h b/libpthread/nptl/sysdeps/x86_64/pthreaddef.h index b33c18638..36e994d86 100644 --- a/libpthread/nptl/sysdeps/x86_64/pthreaddef.h +++ b/libpthread/nptl/sysdeps/x86_64/pthreaddef.h @@ -40,4 +40,4 @@  /* While there is no such syscall.  */  #define __exit_thread_inline(val) \ -  __asm__ volatile ("syscall" :: "a" (__NR_exit), "D" (val)) +  __asm__ __volatile__ ("syscall" :: "a" (__NR_exit), "D" (val)) diff --git a/libpthread/nptl/sysdeps/x86_64/tls.h b/libpthread/nptl/sysdeps/x86_64/tls.h index 396ad4213..afaf1597a 100644 --- a/libpthread/nptl/sysdeps/x86_64/tls.h +++ b/libpthread/nptl/sysdeps/x86_64/tls.h @@ -170,7 +170,7 @@ typedef struct       _head->self = _thrdescr;						      \  									      \       /* It is a simple syscall to set the %fs value for the thread.  */	      \ -     __asm__ volatile ("syscall"						      \ +     __asm__ __volatile__ ("syscall"						      \  		   : "=a" (_result)					      \  		   : "0" ((unsigned long int) __NR_arch_prctl),		      \  		     "D" ((unsigned long int) ARCH_SET_FS),		      \ @@ -189,7 +189,7 @@ typedef struct  /* Return the thread descriptor for the current thread. -   The contained asm must *not* be marked volatile since otherwise +   The contained asm must *not* be marked __volatile__ since otherwise     assignments like  	pthread_descr self = thread_self();     do not get optimized away.  */ @@ -207,11 +207,11 @@ typedef struct  # define THREAD_GETMEM(descr, member) \    ({ __typeof (descr->member) __value;					      \       if (sizeof (__value) == 1)						      \ -       __asm__ volatile ("movb %%fs:%P2,%b0"				      \ +       __asm__ __volatile__ ("movb %%fs:%P2,%b0"				      \  		     : "=q" (__value)					      \  		     : "0" (0), "i" (offsetof (struct pthread, member)));     \       else if (sizeof (__value) == 4)					      \ -       __asm__ volatile ("movl %%fs:%P1,%0"					      \ +       __asm__ __volatile__ ("movl %%fs:%P1,%0"					      \  		     : "=r" (__value)					      \  		     : "i" (offsetof (struct pthread, member)));	      \       else								      \ @@ -221,7 +221,7 @@ typedef struct  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movq %%fs:%P1,%q0"				      \ +	 __asm__ __volatile__ ("movq %%fs:%P1,%q0"				      \  		       : "=r" (__value)					      \  		       : "i" (offsetof (struct pthread, member)));	      \         }								      \ @@ -232,12 +232,12 @@ typedef struct  # define THREAD_GETMEM_NC(descr, member, idx) \    ({ __typeof (descr->member[0]) __value;				      \       if (sizeof (__value) == 1)						      \ -       __asm__ volatile ("movb %%fs:%P2(%q3),%b0"				      \ +       __asm__ __volatile__ ("movb %%fs:%P2(%q3),%b0"				      \  		     : "=q" (__value)					      \  		     : "0" (0), "i" (offsetof (struct pthread, member[0])),   \  		       "r" (idx));					      \       else if (sizeof (__value) == 4)					      \ -       __asm__ volatile ("movl %%fs:%P1(,%q2,4),%0"				      \ +       __asm__ __volatile__ ("movl %%fs:%P1(,%q2,4),%0"				      \  		     : "=r" (__value)					      \  		     : "i" (offsetof (struct pthread, member[0])), "r" (idx));\       else								      \ @@ -247,7 +247,7 @@ typedef struct  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movq %%fs:%P1(,%q2,8),%q0"			      \ +	 __asm__ __volatile__ ("movq %%fs:%P1(,%q2,8),%q0"			      \  		       : "=r" (__value)					      \  		       : "i" (offsetof (struct pthread, member[0])),	      \  			 "r" (idx));					      \ @@ -267,11 +267,11 @@ typedef struct  /* Same as THREAD_SETMEM, but the member offset can be non-constant.  */  # define THREAD_SETMEM(descr, member, value) \    ({ if (sizeof (descr->member) == 1)					      \ -       __asm__ volatile ("movb %b0,%%fs:%P1" :				      \ +       __asm__ __volatile__ ("movb %b0,%%fs:%P1" :				      \  		     : "iq" (value),					      \  		       "i" (offsetof (struct pthread, member)));	      \       else if (sizeof (descr->member) == 4)				      \ -       __asm__ volatile ("movl %0,%%fs:%P1" :				      \ +       __asm__ __volatile__ ("movl %0,%%fs:%P1" :				      \  		     : IMM_MODE (value),				      \  		       "i" (offsetof (struct pthread, member)));	      \       else								      \ @@ -281,7 +281,7 @@ typedef struct  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movq %q0,%%fs:%P1" :				      \ +	 __asm__ __volatile__ ("movq %q0,%%fs:%P1" :				      \  		       : IMM_MODE ((unsigned long int) value),		      \  			 "i" (offsetof (struct pthread, member)));	      \         }}) @@ -290,12 +290,12 @@ typedef struct  /* Set member of the thread descriptor directly.  */  # define THREAD_SETMEM_NC(descr, member, idx, value) \    ({ if (sizeof (descr->member[0]) == 1)				      \ -       __asm__ volatile ("movb %b0,%%fs:%P1(%q2)" :				      \ +       __asm__ __volatile__ ("movb %b0,%%fs:%P1(%q2)" :				      \  		     : "iq" (value),					      \  		       "i" (offsetof (struct pthread, member[0])),	      \  		       "r" (idx));					      \       else if (sizeof (descr->member[0]) == 4)				      \ -       __asm__ volatile ("movl %0,%%fs:%P1(,%q2,4)" :			      \ +       __asm__ __volatile__ ("movl %0,%%fs:%P1(,%q2,4)" :			      \  		     : IMM_MODE (value),				      \  		       "i" (offsetof (struct pthread, member[0])),	      \  		       "r" (idx));					      \ @@ -306,7 +306,7 @@ typedef struct  	      4 or 8.  */						      \  	   abort ();							      \  									      \ -	 __asm__ volatile ("movq %q0,%%fs:%P1(,%q2,8)" :			      \ +	 __asm__ __volatile__ ("movq %q0,%%fs:%P1(,%q2,8)" :			      \  		       : IMM_MODE ((unsigned long int) value),		      \  			 "i" (offsetof (struct pthread, member[0])),	      \  			 "r" (idx));					      \ @@ -318,7 +318,7 @@ typedef struct    ({ __typeof (descr->member) __ret;					      \       __typeof (oldval) __old = (oldval);				      \       if (sizeof (descr->member) == 4)					      \ -       __asm__ volatile (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3"		      \ +       __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %%fs:%P3"		      \  		     : "=a" (__ret)					      \  		     : "0" (__old), "r" (newval),			      \  		       "i" (offsetof (struct pthread, member)));	      \ @@ -331,7 +331,7 @@ typedef struct  /* Atomic logical and.  */  # define THREAD_ATOMIC_AND(descr, member, val) \    (void) ({ if (sizeof ((descr)->member) == 4)				      \ -	      __asm__ volatile (LOCK_PREFIX "andl %1, %%fs:%P0"		      \ +	      __asm__ __volatile__ (LOCK_PREFIX "andl %1, %%fs:%P0"		      \  			    :: "i" (offsetof (struct pthread, member)),	      \  			       "ir" (val));				      \  	    else							      \ @@ -342,7 +342,7 @@ typedef struct  /* Atomic set bit.  */  # define THREAD_ATOMIC_BIT_SET(descr, member, bit) \    (void) ({ if (sizeof ((descr)->member) == 4)				      \ -	      __asm__ volatile (LOCK_PREFIX "orl %1, %%fs:%P0"		      \ +	      __asm__ __volatile__ (LOCK_PREFIX "orl %1, %%fs:%P0"		      \  			    :: "i" (offsetof (struct pthread, member)),	      \  			       "ir" (1 << (bit)));			      \  	    else							      \ @@ -352,7 +352,7 @@ typedef struct  # define CALL_THREAD_FCT(descr) \    ({ void *__res;							      \ -     __asm__ volatile ("movq %%fs:%P2, %%rdi\n\t"				      \ +     __asm__ __volatile__ ("movq %%fs:%P2, %%rdi\n\t"				      \  		   "callq *%%fs:%P1"					      \  		   : "=a" (__res)					      \  		   : "i" (offsetof (struct pthread, start_routine)),	      \ @@ -385,7 +385,7 @@ typedef struct  # define THREAD_GSCOPE_RESET_FLAG() \    do									      \      { int __res;							      \ -      __asm__ volatile ("xchgl %0, %%fs:%P1"				      \ +      __asm__ __volatile__ ("xchgl %0, %%fs:%P1"				      \  		    : "=r" (__res)					      \  		    : "i" (offsetof (struct pthread, header.gscope_flag)),    \  		      "0" (THREAD_GSCOPE_FLAG_UNUSED));			      \ | 
