summaryrefslogtreecommitdiffstats
path: root/libc/sysdeps/linux/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'libc/sysdeps/linux/sparc')
-rw-r--r--libc/sysdeps/linux/sparc/Makefile.arch2
-rw-r--r--libc/sysdeps/linux/sparc/bits/atomic.h327
-rw-r--r--libc/sysdeps/linux/sparc/bits/syscalls.h189
-rw-r--r--libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h3
-rw-r--r--libc/sysdeps/linux/sparc/clone.S97
5 files changed, 519 insertions, 99 deletions
diff --git a/libc/sysdeps/linux/sparc/Makefile.arch b/libc/sysdeps/linux/sparc/Makefile.arch
index 4562eaba5..fac37e22c 100644
--- a/libc/sysdeps/linux/sparc/Makefile.arch
+++ b/libc/sysdeps/linux/sparc/Makefile.arch
@@ -8,7 +8,7 @@
CSRC := brk.c __syscall_error.c qp_ops.c
SSRC := \
- __longjmp.S fork.S vfork.S clone.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \
+ __longjmp.S setjmp.S bsd-setjmp.S bsd-_setjmp.S \
syscall.S urem.S udiv.S umul.S sdiv.S rem.S
include $(top_srcdir)libc/sysdeps/linux/Makefile.commonarch
diff --git a/libc/sysdeps/linux/sparc/bits/atomic.h b/libc/sysdeps/linux/sparc/bits/atomic.h
new file mode 100644
index 000000000..ef553f727
--- /dev/null
+++ b/libc/sysdeps/linux/sparc/bits/atomic.h
@@ -0,0 +1,327 @@
+/* Atomic operations. sparc32 version.
+ Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Jakub Jelinek <jakub@redhat.com>, 2003.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _BITS_ATOMIC_H
+#define _BITS_ATOMIC_H 1
+
+#include <stdint.h>
+
+typedef int8_t atomic8_t;
+typedef uint8_t uatomic8_t;
+typedef int_fast8_t atomic_fast8_t;
+typedef uint_fast8_t uatomic_fast8_t;
+
+typedef int16_t atomic16_t;
+typedef uint16_t uatomic16_t;
+typedef int_fast16_t atomic_fast16_t;
+typedef uint_fast16_t uatomic_fast16_t;
+
+typedef int32_t atomic32_t;
+typedef uint32_t uatomic32_t;
+typedef int_fast32_t atomic_fast32_t;
+typedef uint_fast32_t uatomic_fast32_t;
+
+typedef int64_t atomic64_t;
+typedef uint64_t uatomic64_t;
+typedef int_fast64_t atomic_fast64_t;
+typedef uint_fast64_t uatomic_fast64_t;
+
+typedef intptr_t atomicptr_t;
+typedef uintptr_t uatomicptr_t;
+typedef intmax_t atomic_max_t;
+typedef uintmax_t uatomic_max_t;
+
+
+/* We have no compare and swap, just test and set.
+ The following implementation contends on 64 global locks
+ per library and assumes no variable will be accessed using atomic.h
+ macros from two different libraries. */
+
+__make_section_unallocated
+ (".gnu.linkonce.b.__sparc32_atomic_locks, \"aw\", %nobits");
+
+volatile unsigned char __sparc32_atomic_locks[64]
+ __attribute__ ((nocommon, section (".gnu.linkonce.b.__sparc32_atomic_locks"
+ __sec_comment),
+ visibility ("hidden")));
+
+#define __sparc32_atomic_do_lock(addr) \
+ do \
+ { \
+ unsigned int __old_lock; \
+ unsigned int __idx = (((long) addr >> 2) ^ ((long) addr >> 12)) \
+ & 63; \
+ do \
+ __asm __volatile ("ldstub %1, %0" \
+ : "=r" (__old_lock), \
+ "=m" (__sparc32_atomic_locks[__idx]) \
+ : "m" (__sparc32_atomic_locks[__idx]) \
+ : "memory"); \
+ while (__old_lock); \
+ } \
+ while (0)
+
+#define __sparc32_atomic_do_unlock(addr) \
+ do \
+ { \
+ __sparc32_atomic_locks[(((long) addr >> 2) \
+ ^ ((long) addr >> 12)) & 63] = 0; \
+ __asm __volatile ("" ::: "memory"); \
+ } \
+ while (0)
+
+#define __sparc32_atomic_do_lock24(addr) \
+ do \
+ { \
+ unsigned int __old_lock; \
+ do \
+ __asm __volatile ("ldstub %1, %0" \
+ : "=r" (__old_lock), "=m" (*(addr)) \
+ : "m" (*(addr)) \
+ : "memory"); \
+ while (__old_lock); \
+ } \
+ while (0)
+
+#define __sparc32_atomic_do_unlock24(addr) \
+ do \
+ { \
+ *(char *) (addr) = 0; \
+ __asm __volatile ("" ::: "memory"); \
+ } \
+ while (0)
+
+
+#ifndef SHARED
+# define __v9_compare_and_exchange_val_32_acq(mem, newval, oldval) \
+({ \
+ register __typeof (*(mem)) __acev_tmp __asm ("%g6"); \
+ register __typeof (mem) __acev_mem __asm ("%g1") = (mem); \
+ register __typeof (*(mem)) __acev_oldval __asm ("%g5"); \
+ __acev_tmp = (newval); \
+ __acev_oldval = (oldval); \
+ /* .word 0xcde05005 is cas [%g1], %g5, %g6. Can't use cas here though, \
+ because as will then mark the object file as V8+ arch. */ \
+ __asm __volatile (".word 0xcde05005" \
+ : "+r" (__acev_tmp), "=m" (*__acev_mem) \
+ : "r" (__acev_oldval), "m" (*__acev_mem), \
+ "r" (__acev_mem) : "memory"); \
+ __acev_tmp; })
+#endif
+
+/* The only basic operation needed is compare and exchange. */
+#define __v7_compare_and_exchange_val_acq(mem, newval, oldval) \
+ ({ __typeof (mem) __acev_memp = (mem); \
+ __typeof (*mem) __acev_ret; \
+ __typeof (*mem) __acev_newval = (newval); \
+ \
+ __sparc32_atomic_do_lock (__acev_memp); \
+ __acev_ret = *__acev_memp; \
+ if (__acev_ret == (oldval)) \
+ *__acev_memp = __acev_newval; \
+ __sparc32_atomic_do_unlock (__acev_memp); \
+ __acev_ret; })
+
+#define __v7_compare_and_exchange_bool_acq(mem, newval, oldval) \
+ ({ __typeof (mem) __aceb_memp = (mem); \
+ int __aceb_ret; \
+ __typeof (*mem) __aceb_newval = (newval); \
+ \
+ __sparc32_atomic_do_lock (__aceb_memp); \
+ __aceb_ret = 0; \
+ if (*__aceb_memp == (oldval)) \
+ *__aceb_memp = __aceb_newval; \
+ else \
+ __aceb_ret = 1; \
+ __sparc32_atomic_do_unlock (__aceb_memp); \
+ __aceb_ret; })
+
+#define __v7_exchange_acq(mem, newval) \
+ ({ __typeof (mem) __acev_memp = (mem); \
+ __typeof (*mem) __acev_ret; \
+ __typeof (*mem) __acev_newval = (newval); \
+ \
+ __sparc32_atomic_do_lock (__acev_memp); \
+ __acev_ret = *__acev_memp; \
+ *__acev_memp = __acev_newval; \
+ __sparc32_atomic_do_unlock (__acev_memp); \
+ __acev_ret; })
+
+#define __v7_exchange_and_add(mem, value) \
+ ({ __typeof (mem) __acev_memp = (mem); \
+ __typeof (*mem) __acev_ret; \
+ \
+ __sparc32_atomic_do_lock (__acev_memp); \
+ __acev_ret = *__acev_memp; \
+ *__acev_memp = __acev_ret + (value); \
+ __sparc32_atomic_do_unlock (__acev_memp); \
+ __acev_ret; })
+
+/* Special versions, which guarantee that top 8 bits of all values
+ are cleared and use those bits as the ldstub lock. */
+#define __v7_compare_and_exchange_val_24_acq(mem, newval, oldval) \
+ ({ __typeof (mem) __acev_memp = (mem); \
+ __typeof (*mem) __acev_ret; \
+ __typeof (*mem) __acev_newval = (newval); \
+ \
+ __sparc32_atomic_do_lock24 (__acev_memp); \
+ __acev_ret = *__acev_memp & 0xffffff; \
+ if (__acev_ret == (oldval)) \
+ *__acev_memp = __acev_newval; \
+ else \
+ __sparc32_atomic_do_unlock24 (__acev_memp); \
+ __asm __volatile ("" ::: "memory"); \
+ __acev_ret; })
+
+#define __v7_exchange_24_rel(mem, newval) \
+ ({ __typeof (mem) __acev_memp = (mem); \
+ __typeof (*mem) __acev_ret; \
+ __typeof (*mem) __acev_newval = (newval); \
+ \
+ __sparc32_atomic_do_lock24 (__acev_memp); \
+ __acev_ret = *__acev_memp & 0xffffff; \
+ *__acev_memp = __acev_newval; \
+ __asm __volatile ("" ::: "memory"); \
+ __acev_ret; })
+
+#ifdef SHARED
+
+/* When dynamically linked, we assume pre-v9 libraries are only ever
+ used on pre-v9 CPU. */
+# define __atomic_is_v9 0
+
+# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+ __v7_compare_and_exchange_val_acq (mem, newval, oldval)
+
+# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+ __v7_compare_and_exchange_bool_acq (mem, newval, oldval)
+
+# define atomic_exchange_acq(mem, newval) \
+ __v7_exchange_acq (mem, newval)
+
+# define atomic_exchange_and_add(mem, value) \
+ __v7_exchange_and_add (mem, value)
+
+# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
+ ({ \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ __v7_compare_and_exchange_val_24_acq (mem, newval, oldval); })
+
+# define atomic_exchange_24_rel(mem, newval) \
+ ({ \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ __v7_exchange_24_rel (mem, newval); })
+
+#else
+
+/* In libc.a/libpthread.a etc. we don't know if we'll be run on
+ pre-v9 or v9 CPU. To be interoperable with dynamically linked
+ apps on v9 CPUs e.g. with process shared primitives, use cas insn
+ on v9 CPUs and ldstub on pre-v9. */
+
+/* Avoid <ldsodefs.h> include here. */
+extern uint64_t _dl_hwcap __attribute__((weak));
+# define __ATOMIC_HWCAP_SPARC_V9 16
+# define __atomic_is_v9 \
+ (__builtin_expect (&_dl_hwcap != 0, 1) \
+ && __builtin_expect (_dl_hwcap & __ATOMIC_HWCAP_SPARC_V9, \
+ __ATOMIC_HWCAP_SPARC_V9))
+
+# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
+ ({ \
+ __typeof (*mem) __acev_wret; \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ if (__atomic_is_v9) \
+ __acev_wret \
+ = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
+ else \
+ __acev_wret \
+ = __v7_compare_and_exchange_val_acq (mem, newval, oldval); \
+ __acev_wret; })
+
+# define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
+ ({ \
+ int __acev_wret; \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ if (__atomic_is_v9) \
+ { \
+ __typeof (oldval) __acev_woldval = (oldval); \
+ __acev_wret \
+ = __v9_compare_and_exchange_val_32_acq (mem, newval, \
+ __acev_woldval) \
+ != __acev_woldval; \
+ } \
+ else \
+ __acev_wret \
+ = __v7_compare_and_exchange_bool_acq (mem, newval, oldval); \
+ __acev_wret; })
+
+# define atomic_exchange_rel(mem, newval) \
+ ({ \
+ __typeof (*mem) __acev_wret; \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ if (__atomic_is_v9) \
+ { \
+ __typeof (mem) __acev_wmemp = (mem); \
+ __typeof (*(mem)) __acev_wval = (newval); \
+ do \
+ __acev_wret = *__acev_wmemp; \
+ while (__builtin_expect \
+ (__v9_compare_and_exchange_val_32_acq (__acev_wmemp,\
+ __acev_wval, \
+ __acev_wret) \
+ != __acev_wret, 0)); \
+ } \
+ else \
+ __acev_wret = __v7_exchange_acq (mem, newval); \
+ __acev_wret; })
+
+# define atomic_compare_and_exchange_val_24_acq(mem, newval, oldval) \
+ ({ \
+ __typeof (*mem) __acev_wret; \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ if (__atomic_is_v9) \
+ __acev_wret \
+ = __v9_compare_and_exchange_val_32_acq (mem, newval, oldval);\
+ else \
+ __acev_wret \
+ = __v7_compare_and_exchange_val_24_acq (mem, newval, oldval);\
+ __acev_wret; })
+
+# define atomic_exchange_24_rel(mem, newval) \
+ ({ \
+ __typeof (*mem) __acev_w24ret; \
+ if (sizeof (*mem) != 4) \
+ abort (); \
+ if (__atomic_is_v9) \
+ __acev_w24ret = atomic_exchange_rel (mem, newval); \
+ else \
+ __acev_w24ret = __v7_exchange_24_rel (mem, newval); \
+ __acev_w24ret; })
+
+#endif
+
+#endif /* bits/atomic.h */
diff --git a/libc/sysdeps/linux/sparc/bits/syscalls.h b/libc/sysdeps/linux/sparc/bits/syscalls.h
index fdaa4dee1..4dd161ebf 100644
--- a/libc/sysdeps/linux/sparc/bits/syscalls.h
+++ b/libc/sysdeps/linux/sparc/bits/syscalls.h
@@ -31,6 +31,8 @@
# error unknown __WORDSIZE
#endif
+#define __SYSCALL_CLOBBERS "cc", "memory"
+
#define __SYSCALL_RETURN(type) \
if (__SYSCALL_RES_CHECK) \
return (type) __res; \
@@ -41,75 +43,35 @@
#define _syscall0(type,name) \
type name(void) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res)\
- : "r" (__g1) \
- : "o0", "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,0)); \
}
#undef _syscall1
#define _syscall1(type,name,type1,arg1) \
type name(type1 arg1) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,1,arg1)); \
}
#undef _syscall2
#define _syscall2(type,name,type1,arg1,type2,arg2) \
type name(type1 arg1,type2 arg2) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,2,arg1,arg2)); \
}
#undef _syscall3
#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
type name(type1 arg1,type2 arg2,type3 arg3) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,3,arg1,arg2,arg3)); \
}
#undef _syscall4
#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,4,arg1,arg2,arg3,arg4)); \
}
#undef _syscall5
@@ -117,18 +79,7 @@ __SYSCALL_RETURN(type) \
type5,arg5) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,5,arg1,arg2,arg3,arg4,arg5)); \
}
#undef _syscall6
@@ -136,20 +87,118 @@ __SYSCALL_RETURN(type) \
type5,arg5,type6,arg6) \
type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6) \
{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-register long __o5 __asm__ ("o5") = (long)(arg6); \
-__asm__ __volatile__ (__SYSCALL_STRING \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__o5), "r" (__g1) \
- : "cc"); \
-__SYSCALL_RETURN(type) \
+ return (type)(INLINE_SYSCALL(name,6,arg1,arg2,arg3,arg4,arg5,arg6)); \
+}
+
+#ifndef NOT_IN_libc
+#define DEBUG_SYSCALL(name) { \
+ char d[64];\
+ write( 2, d, snprintf( d, 64, "syscall %d error %d\n", __NR_##name, _inline_sys_result)); \
}
+#else
+#define DEBUG_SYSCALL(name) do{} while(0)
+#endif
+
+#undef INLINE_SYSCALL
+#define INLINE_SYSCALL(name, nr, args...) \
+ ({ unsigned int _inline_sys_result = INTERNAL_SYSCALL (name, , nr, args); \
+ if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (_inline_sys_result, ), 0)) \
+ { \
+ __set_errno (INTERNAL_SYSCALL_ERRNO (_inline_sys_result, )); \
+ _inline_sys_result = (unsigned int) -1; \
+ } \
+ (int) _inline_sys_result; })
+
+#undef INTERNAL_SYSCALL_DECL
+#define INTERNAL_SYSCALL_DECL(err) do { } while (0)
+
+
+#define INTERNAL_SYSCALL( name, err, nr, args...) \
+ INTERNAL_SYSCALL_NCS( __NR_##name, err, nr, args )
+
+
+#define INTERNAL_SYSCALL_NCS(sys_num, err, nr, args...) \
+ ({ \
+ unsigned int __res; \
+ { \
+ register long __o0 __asm__("o0"); \
+ register long __g1 __asm__("g1") = sys_num; \
+ LOAD_ARGS_##nr(args) \
+ __asm__ __volatile__( __SYSCALL_STRING \
+ : "=r" (__res), "=&r" (__o0) \
+ : "1" (__o0) ASM_ARGS_##nr, "r" (__g1) \
+ : __SYSCALL_CLOBBERS ); \
+ } \
+ (int)__res; \
+ })
+
+#undef INTERNAL_SYSCALL_ERROR_P
+#define INTERNAL_SYSCALL_ERROR_P(val, err) \
+ ((unsigned int) (val) >= 0xfffff001u)
+
+#undef INTERNAL_SYSCALL_ERRNO
+#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val))
+
+# define CALL_ERRNO_LOCATION "call __errno_location;"
+#define __CLONE_SYSCALL_STRING \
+ "ta 0x10;" \
+ "bcs 2f;" \
+ " sub %%o1, 1, %%o1;" \
+ "and %%o0, %%o1, %%o0;" \
+ "1:" \
+ ".subsection 2;" \
+ "2:" \
+ "save %%sp, -192, %%sp;" \
+ CALL_ERRNO_LOCATION \
+ " nop;" \
+ "st %%i0, [%%o0];" \
+ "ba 1b;" \
+ " restore %%g0, -1, %%o0;" \
+ ".previous;"
+
+#define INLINE_CLONE_SYSCALL(arg1,arg2,arg3,arg4,arg5) \
+({ \
+ register long __o0 __asm__ ("o0") = (long)(arg1); \
+ register long __o1 __asm__ ("o1") = (long)(arg2); \
+ register long __o2 __asm__ ("o2") = (long)(arg3); \
+ register long __o3 __asm__ ("o3") = (long)(arg4); \
+ register long __o4 __asm__ ("o4") = (long)(arg5); \
+ register long __g1 __asm__ ("g1") = __NR_clone; \
+ __asm __volatile (__CLONE_SYSCALL_STRING : \
+ "=r" (__g1), "=r" (__o0), "=r" (__o1) : \
+ "0" (__g1), "1" (__o0), "2" (__o1), \
+ "r" (__o2), "r" (__o3), "r" (__o4) : \
+ __SYSCALL_CLOBBERS); \
+ __o0; \
+})
+
+#define LOAD_ARGS_0()
+#define ASM_ARGS_0
+#define LOAD_ARGS_1(o0) \
+ __o0 = (int)o0; \
+ LOAD_ARGS_0()
+#define ASM_ARGS_1 ASM_ARGS_0, "r" (__o0)
+#define LOAD_ARGS_2(o0, o1) \
+ register int __o1 __asm__ ("o1") = (int) (o1); \
+ LOAD_ARGS_1 (o0)
+#define ASM_ARGS_2 ASM_ARGS_1, "r" (__o1)
+#define LOAD_ARGS_3(o0, o1, o2) \
+ register int __o2 __asm__ ("o2") = (int) (o2); \
+ LOAD_ARGS_2 (o0, o1)
+#define ASM_ARGS_3 ASM_ARGS_2, "r" (__o2)
+#define LOAD_ARGS_4(o0, o1, o2, o3) \
+ register int __o3 __asm__ ("o3") = (int) (o3); \
+ LOAD_ARGS_3 (o0, o1, o2)
+#define ASM_ARGS_4 ASM_ARGS_3, "r" (__o3)
+#define LOAD_ARGS_5(o0, o1, o2, o3, o4) \
+ register int __o4 __asm__ ("o4") = (int) (o4); \
+ LOAD_ARGS_4 (o0, o1, o2, o3)
+#define ASM_ARGS_5 ASM_ARGS_4, "r" (__o4)
+#define LOAD_ARGS_6(o0, o1, o2, o3, o4, o5) \
+ register int __o5 __asm__ ("o5") = (int) (o5); \
+ LOAD_ARGS_5 (o0, o1, o2, o3, o4)
+#define ASM_ARGS_6 ASM_ARGS_5, "r" (__o5)
+
#endif /* __ASSEMBLER__ */
#endif /* _BITS_SYSCALLS_H */
diff --git a/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h b/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h
index 41d3e7c3d..b097316b0 100644
--- a/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h
+++ b/libc/sysdeps/linux/sparc/bits/uClibc_arch_features.h
@@ -35,4 +35,7 @@
/* define if target supports IEEE signed zero floats */
#define __UCLIBC_HAVE_SIGNED_ZERO__
+/* define if target supports CFI pseudo ops */
+#define __UCLIBC_HAVE_ASM_CFI_DIRECTIVES__
+
#endif /* _BITS_UCLIBC_ARCH_FEATURES_H */
diff --git a/libc/sysdeps/linux/sparc/clone.S b/libc/sysdeps/linux/sparc/clone.S
index 0e41ee0cb..b623bfb8d 100644
--- a/libc/sysdeps/linux/sparc/clone.S
+++ b/libc/sysdeps/linux/sparc/clone.S
@@ -1,4 +1,5 @@
-/* Copyright (C) 1996, 1997, 1998, 2000 Free Software Foundation, Inc.
+/* Copyright (C) 1996, 1997, 1998, 2000, 2003, 2004, 2007
+ Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson (rth@tamu.edu).
@@ -20,47 +21,87 @@
/* clone() is even more special than fork() as it mucks with stacks
and invokes a function in the right context after its all over. */
-#include <features.h>
+#include <asm/errno.h>
#include <asm/unistd.h>
+#include <tcb-offsets.h>
+#include <sysdep.h>
-/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg); */
+#define CLONE_VM 0x00000100
+#define CLONE_THREAD 0x00010000
-.text
-.global clone
-.type clone,%function
-.align 4
+/* int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg,
+ pid_t *ptid, void *tls, pid_t *ctid); */
-clone:
+ .text
+ENTRY (__clone)
save %sp,-96,%sp
+ cfi_def_cfa_register(%fp)
+ cfi_window_save
+ cfi_register(%o7, %i7)
/* sanity check arguments */
- tst %i0
- be __error
- orcc %i1,%g0,%o1
- be __error
- mov %i2,%o0
+ orcc %i0,%g0,%g2
+ be .Leinval
+ orcc %i1,%g0,%o1
+ be .Leinval
+ mov %i2,%o0
+
+ /* The child_stack is the top of the stack, allocate one
+ whole stack frame from that as this is what the kernel
+ expects. */
+ sub %o1, 96, %o1
+ mov %i3, %g3
+ mov %i2, %g4
+
+ /* ptid */
+ mov %i4,%o2
+ /* tls */
+ mov %i5,%o3
+ /* ctid */
+ ld [%fp+92],%o4
/* Do the system call */
set __NR_clone,%g1
ta 0x10
- bcs __error
- tst %o1
+ bcs .Lerror
+ tst %o1
bne __thread_start
- nop
- ret
- restore %o0,%g0,%o0
-
-__error:
- jmp __syscall_error
+ nop
+ jmpl %i7 + 8, %g0
+ restore %o0,%g0,%o0
-.size clone,.-clone
-
-.type __thread_start,%function
+.Leinval:
+ mov EINVAL, %o0
+.Lerror:
+ call HIDDEN_JUMPTARGET(__errno_location)
+ mov %o0, %i0
+ st %i0,[%o0]
+ jmpl %i7 + 8, %g0
+ restore %g0,-1,%o0
+END(__clone)
+ .type __thread_start,@function
__thread_start:
- call %i0
- mov %i3,%o0
+#ifdef RESET_PID
+ sethi %hi(CLONE_THREAD), %l0
+ andcc %g4, %l0, %g0
+ bne 1f
+ andcc %g4, CLONE_VM, %g0
+ bne,a 2f
+ mov -1,%o0
+ set __NR_getpid,%g1
+ ta 0x10
+2:
+ st %o0,[%g7 + PID]
+ st %o0,[%g7 + TID]
+1:
+#endif
+ mov %g0, %fp /* terminate backtrace */
+ call %g2
+ mov %g3,%o0
call HIDDEN_JUMPTARGET(_exit),0
- nop
+ nop
+
+ .size __thread_start, .-__thread_start
-.size __thread_start,.-__thread_start
+weak_alias (__clone, clone)