diff options
Diffstat (limited to 'libc')
| -rw-r--r-- | libc/string/arm/_memcpy.S | 8 | ||||
| -rw-r--r-- | libc/string/arm/bcopy.S | 8 | ||||
| -rw-r--r-- | libc/string/arm/bzero.S | 8 | ||||
| -rw-r--r-- | libc/string/arm/memcmp.S | 12 | ||||
| -rw-r--r-- | libc/string/arm/memcpy.S | 8 | ||||
| -rw-r--r-- | libc/string/arm/memmove.S | 8 | ||||
| -rw-r--r-- | libc/string/arm/memset.S | 11 | ||||
| -rw-r--r-- | libc/string/arm/strcmp.S | 12 | ||||
| -rw-r--r-- | libc/string/arm/strlen.S | 11 | ||||
| -rw-r--r-- | libc/string/arm/strncmp.S | 15 | ||||
| -rw-r--r-- | libc/sysdeps/linux/powerpc/bits/atomic.h | 607 | ||||
| -rw-r--r-- | libc/sysdeps/linux/powerpc/bits/syscalls.h | 160 |
12 files changed, 782 insertions, 86 deletions
diff --git a/libc/string/arm/_memcpy.S b/libc/string/arm/_memcpy.S index 7bd4b0776..236500e5f 100644 --- a/libc/string/arm/_memcpy.S +++ b/libc/string/arm/_memcpy.S @@ -71,10 +71,10 @@ * Apologies for the state of the comments ;-) */ - .text - .global _memcpy; - .type _memcpy,%function - .align 4; \ +.text +.global _memcpy +.type _memcpy,%function +.align 4 _memcpy: /* Determine copy direction */ diff --git a/libc/string/arm/bcopy.S b/libc/string/arm/bcopy.S index c256df3f3..9e3efc7d0 100644 --- a/libc/string/arm/bcopy.S +++ b/libc/string/arm/bcopy.S @@ -39,10 +39,10 @@ /* bcopy = memcpy/memmove with arguments reversed. */ - .text - .global bcopy; - .type bcopy,%function - .align 4; \ +.text +.global bcopy +.type bcopy,%function +.align 4 bcopy: /* switch the source and destination registers */ diff --git a/libc/string/arm/bzero.S b/libc/string/arm/bzero.S index 253b3c88b..0e96ba076 100644 --- a/libc/string/arm/bzero.S +++ b/libc/string/arm/bzero.S @@ -37,10 +37,10 @@ * by Erik Andersen <andersen@codepoet.org> */ - .text - .global __bzero; - .type __bzero,%function - .align 4; \ +.text +.global __bzero +.type __bzero,%function +.align 4 __bzero: mov r2, r1 diff --git a/libc/string/arm/memcmp.S b/libc/string/arm/memcmp.S index e0f21aef4..6f7a064bd 100644 --- a/libc/string/arm/memcmp.S +++ b/libc/string/arm/memcmp.S @@ -30,10 +30,10 @@ */ - .text - .global memcmp; - .type memcmp,%function - .align 4; \ +.text +.global memcmp +.type memcmp,%function +.align 4 memcmp: /* if ((len - 1) < 0) return 0 */ @@ -52,6 +52,4 @@ memcmp: sub r0, r2, r3 mov pc, lr -.weak bcmp; - bcmp = memcmp - +.weak bcmp ; bcmp = memcmp diff --git a/libc/string/arm/memcpy.S b/libc/string/arm/memcpy.S index cc9a43cb4..4869007d3 100644 --- a/libc/string/arm/memcpy.S +++ b/libc/string/arm/memcpy.S @@ -37,10 +37,10 @@ * by Erik Andersen <andersen@codepoet.org> */ - .text - .global memcpy; - .type memcpy,%function - .align 4; \ +.text +.global memcpy +.type memcpy,%function +.align 4 memcpy: stmfd sp!, {r0, lr} diff --git a/libc/string/arm/memmove.S b/libc/string/arm/memmove.S index 84e3e518c..9dbd3a004 100644 --- a/libc/string/arm/memmove.S +++ b/libc/string/arm/memmove.S @@ -37,10 +37,10 @@ * by Erik Andersen <andersen@codepoet.org> */ - .text - .global memmove; - .type memmove,%function - .align 4; \ +.text +.global memmove +.type memmove,%function +.align 4 memmove: stmfd sp!, {r0, lr} diff --git a/libc/string/arm/memset.S b/libc/string/arm/memset.S index 07d3913f9..097c5b92a 100644 --- a/libc/string/arm/memset.S +++ b/libc/string/arm/memset.S @@ -19,10 +19,10 @@ #include <sys/syscall.h> - .text - .global memset; - .type memset,%function - .align 4; \ +.text +.global memset +.type memset,%function +.align 4 memset: mov a4, a1 @@ -68,5 +68,4 @@ memset: strb a2, [a4], $1 mov pc, lr -.size memset,.-memset; - +.size memset,.-memset diff --git a/libc/string/arm/strcmp.S b/libc/string/arm/strcmp.S index b2f26d63a..1f486546c 100644 --- a/libc/string/arm/strcmp.S +++ b/libc/string/arm/strcmp.S @@ -29,10 +29,10 @@ * by Erik Andersen <andersen@codepoet.org> */ - .text - .global strcmp; - .type strcmp,%function - .align 4; \ +.text +.global strcmp +.type strcmp,%function +.align 4 strcmp: 1: @@ -44,6 +44,4 @@ strcmp: sub r0, r2, r3 mov pc, lr -.weak strcoll; - strcoll = strcmp - +.weak strcoll ; strcoll = strcmp diff --git a/libc/string/arm/strlen.S b/libc/string/arm/strlen.S index 65ee94d99..195276e30 100644 --- a/libc/string/arm/strlen.S +++ b/libc/string/arm/strlen.S @@ -25,10 +25,10 @@ * exit: r0 = len */ - .text - .global strlen; - .type strlen,%function - .align 4; \ +.text +.global strlen +.type strlen,%function +.align 4 strlen: bic r1, r0, $3 @ addr of word containing first byte @@ -76,5 +76,4 @@ Llastword: @ drop through to here once we find a #endif mov pc,lr -.size strlen,.-strlen; - +.size strlen,.-strlen diff --git a/libc/string/arm/strncmp.S b/libc/string/arm/strncmp.S index 6f478b5ed..46f5f5092 100644 --- a/libc/string/arm/strncmp.S +++ b/libc/string/arm/strncmp.S @@ -29,16 +29,17 @@ * by Erik Andersen <andersen@codepoet.org> */ - .text - .global strncmp; - .type strncmp,%function - .align 4; \ +.text +.global strncmp +.type strncmp,%function +.align 4 strncmp: - /* if ((len - 1) < 0) return 0 */ + /* if (len == 0) return 0 */ + cmp r2, #0 + moveq r0, #0 + moveq pc, lr subs r2, r2, #1 - movmi r0, #0 - movmi pc, lr /* ip == last src address to compare */ add ip, r0, r2 diff --git a/libc/sysdeps/linux/powerpc/bits/atomic.h b/libc/sysdeps/linux/powerpc/bits/atomic.h new file mode 100644 index 000000000..447195538 --- /dev/null +++ b/libc/sysdeps/linux/powerpc/bits/atomic.h @@ -0,0 +1,607 @@ +/* Atomic operations. PowerPC Common version. + Copyright (C) 2003, 2004 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#include <bits/wordsize.h> + +#if __WORDSIZE == 64 +/* Atomic operations. PowerPC64 version. + Copyright (C) 2003, 2004 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* The 32-bit exchange_bool is different on powerpc64 because the subf + does signed 64-bit arthmatic while the lwarx is 32-bit unsigned + (a load word and zero (high 32) form) load. + In powerpc64 register values are 64-bit by default, including oldval. + The value in old val unknown sign extension, lwarx loads the 32-bit + value as unsigned. So we explicitly clear the high 32 bits in oldval. */ +# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +({ \ + unsigned int __tmp, __tmp2; \ + __asm __volatile (" clrldi %1,%1,32\n" \ + "1: lwarx %0,0,%2\n" \ + " subf. %0,%1,%0\n" \ + " bne 2f\n" \ + " stwcx. %4,0,%2\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp), "=r" (__tmp2) \ + : "b" (mem), "1" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp, __tmp2; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + " clrldi %1,%1,32\n" \ + "1: lwarx %0,0,%2\n" \ + " subf. %0,%1,%0\n" \ + " bne 2f\n" \ + " stwcx. %4,0,%2\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp), "=r" (__tmp2) \ + : "b" (mem), "1" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +/* + * Only powerpc64 processors support Load doubleword and reserve index (ldarx) + * and Store doubleword conditional indexed (stdcx) instructions. So here + * we define the 64-bit forms. + */ +# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ +({ \ + unsigned long __tmp; \ + __asm __volatile ( \ + "1: ldarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ +({ \ + unsigned long __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ( \ + "1: ldarx %0,0,%1\n" \ + " cmpd %0,%2\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%1\n" \ + " cmpd %0,%2\n" \ + " bne 2f\n" \ + " stdcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +# define __arch_atomic_exchange_64_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_exchange_64_rel(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3\n" \ + " add %1,%0,%4\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_increment_val_64(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: ldarx %0,0,%2\n" \ + " addi %0,%0,1\n" \ + " stdcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_decrement_val_64(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: ldarx %0,0,%2\n" \ + " subi %0,%0,1\n" \ + " stdcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +# define __arch_atomic_decrement_if_positive_64(mem) \ + ({ int __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3\n" \ + " cmpdi 0,%0,0\n" \ + " addi %1,%0,-1\n" \ + " ble 2f\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +/* + * All powerpc64 processors support the new "light weight" sync (lwsync). + */ +# define atomic_read_barrier() __asm ("lwsync" ::: "memory") +/* + * "light weight" sync can also be used for the release barrier. + */ +# ifndef UP +# define __ARCH_REL_INSTR "lwsync" +# endif + +#else +/* Atomic operations. PowerPC32 version. + Copyright (C) 2003, 2004 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +/* + * The 32-bit exchange_bool is different on powerpc64 because the subf + * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned + * (a load word and zero (high 32) form). So powerpc64 has a slightly + * different version in sysdeps/powerpc/powerpc64/bits/atomic.h. + */ +# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile ( \ + "1: lwarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \ +({ \ + unsigned int __tmp; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1\n" \ + " subf. %0,%2,%0\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (mem), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp != 0; \ +}) + +/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of + load and reserve (ldarx) and store conditional (stdcx.) instructions. + So for powerpc32 we stub out the 64-bit forms. */ +# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ + (abort (), 0) + +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \ + (abort (), 0) + +# define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +# define __arch_atomic_exchange_64_acq(mem, value) \ + ({ abort (); (*mem) = (value); }) + +# define __arch_atomic_exchange_64_rel(mem, value) \ + ({ abort (); (*mem) = (value); }) + +# define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ abort (); (*mem) = (value); }) + +# define __arch_atomic_increment_val_64(mem) \ + ({ abort (); (*mem)++; }) + +# define __arch_atomic_decrement_val_64(mem) \ + ({ abort (); (*mem)--; }) + +# define __arch_atomic_decrement_if_positive_64(mem) \ + ({ abort (); (*mem)--; }) + +/* + * Older powerpc32 processors don't support the new "light weight" + * sync (lwsync). So the only safe option is to use normal sync + * for all powerpc32 applications. + */ +# define atomic_read_barrier() __asm ("sync" ::: "memory") + +#endif + +#include <stdint.h> + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +/* + * Powerpc does not have byte and halfword forms of load and reserve and + * store conditional. So for powerpc we stub out the 8- and 16-bit forms. + */ +#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \ + (abort (), 0) + +#define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \ + (abort (), 0) + +#ifdef UP +# define __ARCH_ACQ_INSTR "" +# define __ARCH_REL_INSTR "" +#else +# define __ARCH_ACQ_INSTR "isync" +# ifndef __ARCH_REL_INSTR +# define __ARCH_REL_INSTR "sync" +# endif +#endif + +#define atomic_full_barrier() __asm ("sync" ::: "memory") +#define atomic_write_barrier() __asm ("eieio" ::: "memory") + +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile ( \ + "1: lwarx %0,0,%1\n" \ + " cmpw %0,%2\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __tmp; \ + __typeof (mem) __memp = (mem); \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%1\n" \ + " cmpw %0,%2\n" \ + " bne 2f\n" \ + " stwcx. %3,0,%1\n" \ + " bne- 1b\n" \ + "2: " \ + : "=&r" (__tmp) \ + : "b" (__memp), "r" (oldval), "r" (newval) \ + : "cr0", "memory"); \ + __tmp; \ + }) + +#define __arch_atomic_exchange_32_acq(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile ( \ + "1: lwarx %0,0,%2\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b\n" \ + " " __ARCH_ACQ_INSTR \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_32_rel(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%2\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_exchange_and_add_32(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3\n" \ + " add %1,%0,%4\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "r" (value), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_increment_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: lwarx %0,0,%2\n" \ + " addi %0,%0,1\n" \ + " stwcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_val_32(mem) \ + ({ \ + __typeof (*(mem)) __val; \ + __asm __volatile ("1: lwarx %0,0,%2\n" \ + " subi %0,%0,1\n" \ + " stwcx. %0,0,%2\n" \ + " bne- 1b" \ + : "=&b" (__val), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define __arch_atomic_decrement_if_positive_32(mem) \ + ({ int __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3\n" \ + " cmpwi 0,%0,0\n" \ + " addi %1,%0,-1\n" \ + " ble 2f\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b\n" \ + "2: " __ARCH_ACQ_INSTR \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "b" (mem), "m" (*mem) \ + : "cr0", "memory"); \ + __val; \ + }) + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_acq(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_acq (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_acq (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_rel(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32_rel (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64_rel (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_exchange_and_add(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32 (mem, value); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64 (mem, value); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_increment_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_increment_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_increment_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; }) + +#define atomic_decrement_val(mem) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*(mem)) == 4) \ + __result = __arch_atomic_decrement_val_32 (mem); \ + else if (sizeof (*(mem)) == 8) \ + __result = __arch_atomic_decrement_val_64 (mem); \ + else \ + abort (); \ + __result; \ + }) + +#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; }) + + +/* Decrement *MEM if it is > 0, and return the old value. */ +#define atomic_decrement_if_positive(mem) \ + ({ __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_decrement_if_positive_32 (mem); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_decrement_if_positive_64 (mem); \ + else \ + abort (); \ + __result; \ + }) diff --git a/libc/sysdeps/linux/powerpc/bits/syscalls.h b/libc/sysdeps/linux/powerpc/bits/syscalls.h index 6168f3906..75001f218 100644 --- a/libc/sysdeps/linux/powerpc/bits/syscalls.h +++ b/libc/sysdeps/linux/powerpc/bits/syscalls.h @@ -10,62 +10,156 @@ * programs. */ #include <bits/sysnum.h> +/* Define a macro which expands inline into the wrapper code for a system + call. This use is for internal calls that do not need to handle errors + normally. It will never touch errno. + On powerpc a system call basically clobbers the same registers like a + function call, with the exception of LR (which is needed for the + "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal + an error return status). */ -#define __STRINGIFY(s) __STRINGIFY2 (s) -#define __STRINGIFY2(s) #s - -#undef JUMPTARGET -#ifdef __PIC__ -#define __MAKE_SYSCALL __STRINGIFY(__uClibc_syscall@plt) -#else -#define __MAKE_SYSCALL __STRINGIFY(__uClibc_syscall) +# undef INLINE_SYSCALL +#if 0 +# define INLINE_SYSCALL(name, nr, args...) \ + ({ \ + INTERNAL_SYSCALL_DECL (sc_err); \ + long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ + if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ + { \ + __set_errno (INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err)); \ + sc_ret = -1L; \ + } \ + sc_ret; \ + }) #endif -#define unified_syscall_body(name) \ - __asm__ ( \ - ".section \".text\"\n\t" \ - ".align 2\n\t" \ - ".globl " __STRINGIFY(name) "\n\t" \ - ".type " __STRINGIFY(name) ",@function\n\t" \ - #name":\tli 0," __STRINGIFY(__NR_##name) "\n\t" \ - "b " __MAKE_SYSCALL "\n\t" \ - ".size\t" __STRINGIFY(name) ",.""-" __STRINGIFY(name) "\n" \ - ) +# define INLINE_SYSCALL(name, nr, args...) \ + ({ \ + INTERNAL_SYSCALL_DECL (sc_err); \ + long int sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ + if (INTERNAL_SYSCALL_ERROR_P (sc_ret, sc_err)) \ + { \ + sc_ret = __syscall_error(INTERNAL_SYSCALL_ERRNO (sc_ret, sc_err));\ + } \ + sc_ret; \ + }) +/* Define a macro which expands inline into the wrapper code for a system + call. This use is for internal calls that do not need to handle errors + normally. It will never touch errno. + On powerpc a system call basically clobbers the same registers like a + function call, with the exception of LR (which is needed for the + "sc; bnslr+" sequence) and CR (where only CR0.SO is clobbered to signal + an error return status). */ + +# undef INTERNAL_SYSCALL_DECL +# define INTERNAL_SYSCALL_DECL(err) long int err + +# undef INTERNAL_SYSCALL +# define INTERNAL_SYSCALL_NCS(name, err, nr, args...) \ + ({ \ + register long int r0 __asm__ ("r0"); \ + register long int r3 __asm__ ("r3"); \ + register long int r4 __asm__ ("r4"); \ + register long int r5 __asm__ ("r5"); \ + register long int r6 __asm__ ("r6"); \ + register long int r7 __asm__ ("r7"); \ + register long int r8 __asm__ ("r8"); \ + register long int r9 __asm__ ("r9"); \ + register long int r10 __asm__ ("r10"); \ + register long int r11 __asm__ ("r11"); \ + register long int r12 __asm__ ("r12"); \ + LOADARGS_##nr(name, args); \ + __asm__ __volatile__ \ + ("sc \n\t" \ + "mfcr %0" \ + : "=&r" (r0), \ + "=&r" (r3), "=&r" (r4), "=&r" (r5), "=&r" (r6), "=&r" (r7), \ + "=&r" (r8), "=&r" (r9), "=&r" (r10), "=&r" (r11), "=&r" (r12) \ + : ASM_INPUT_##nr \ + : "cr0", "ctr", "memory"); \ + err = r0; \ + (int) r3; \ + }) +# define INTERNAL_SYSCALL(name, err, nr, args...) \ + INTERNAL_SYSCALL_NCS (__NR_##name, err, nr, ##args) + +# undef INTERNAL_SYSCALL_ERROR_P +# define INTERNAL_SYSCALL_ERROR_P(val, err) \ + ((void) (val), __builtin_expect ((err) & (1 << 28), 0)) + +# undef INTERNAL_SYSCALL_ERRNO +# define INTERNAL_SYSCALL_ERRNO(val, err) (val) + +# define LOADARGS_0(name, dummy) \ + r0 = (long int)name +# define LOADARGS_1(name, __arg1) \ + LOADARGS_0(name, 0); \ + r3 = (long int)__arg1 +# define LOADARGS_2(name, __arg1, __arg2) \ + LOADARGS_1(name, __arg1); \ + r4 = (long int)__arg2 +# define LOADARGS_3(name, __arg1, __arg2, __arg3) \ + LOADARGS_2(name, __arg1, __arg2); \ + r5 = (long int)__arg3 +# define LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4) \ + LOADARGS_3(name, __arg1, __arg2, __arg3); \ + r6 = (long int)__arg4 +# define LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5) \ + LOADARGS_4(name, __arg1, __arg2, __arg3, __arg4); \ + r7 = (long int)__arg5 +# define LOADARGS_6(name, __arg1, __arg2, __arg3, __arg4, __arg5, __arg6) \ + LOADARGS_5(name, __arg1, __arg2, __arg3, __arg4, __arg5); \ + r8 = (long int)__arg6 + +# define ASM_INPUT_0 "0" (r0) +# define ASM_INPUT_1 ASM_INPUT_0, "1" (r3) +# define ASM_INPUT_2 ASM_INPUT_1, "2" (r4) +# define ASM_INPUT_3 ASM_INPUT_2, "3" (r5) +# define ASM_INPUT_4 ASM_INPUT_3, "4" (r6) +# define ASM_INPUT_5 ASM_INPUT_4, "5" (r7) +# define ASM_INPUT_6 ASM_INPUT_5, "6" (r8) #undef _syscall0 -#define _syscall0(type,name) \ -type name(void); \ -unified_syscall_body(name) +#define _syscall0(type,name) \ +type name(void){ \ + return INLINE_SYSCALL(name, 0); \ +} #undef _syscall1 #define _syscall1(type,name,type1,arg1) \ -type name(type1 arg1); \ -unified_syscall_body(name) +type name(type1 arg1){ \ + return INLINE_SYSCALL(name, 1, arg1); \ +} #undef _syscall2 #define _syscall2(type,name,type1,arg1,type2,arg2) \ -type name(type1 arg1, type2 arg2); \ -unified_syscall_body(name) +type name(type1 arg1, type2 arg2){ \ + return INLINE_SYSCALL(name, 2, arg1, arg2); \ +} #undef _syscall3 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \ -type name(type1 arg1, type2 arg2, type3 arg3); \ -unified_syscall_body(name) +type name(type1 arg1, type2 arg2, type3 arg3){ \ + return INLINE_SYSCALL(name, 3, arg1, arg2, arg3); \ +} #undef _syscall4 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4); \ -unified_syscall_body(name) +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4){ \ + return INLINE_SYSCALL(name, 4, arg1, arg2, arg3, arg4); \ +} #undef _syscall5 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5); \ -unified_syscall_body(name) +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5){ \ + return INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \ +} #undef _syscall6 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \ -type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6); \ -unified_syscall_body(name) +type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6){ \ + return INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6); \ +} #endif /* _BITS_SYSCALLS_H */ |
