summaryrefslogtreecommitdiffstats
path: root/libpthread/linuxthreads.old
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/linuxthreads.old')
-rw-r--r--libpthread/linuxthreads.old/cancel.c4
-rw-r--r--libpthread/linuxthreads.old/debug.h18
-rw-r--r--libpthread/linuxthreads.old/internals.h10
-rw-r--r--libpthread/linuxthreads.old/join.c6
-rw-r--r--libpthread/linuxthreads.old/manager.c26
-rw-r--r--libpthread/linuxthreads.old/oldsemaphore.c8
-rw-r--r--libpthread/linuxthreads.old/ptfork.c2
-rw-r--r--libpthread/linuxthreads.old/pthread.c10
-rw-r--r--libpthread/linuxthreads.old/queue.h8
-rw-r--r--libpthread/linuxthreads.old/restart.h6
-rw-r--r--libpthread/linuxthreads.old/rwlock.c12
-rw-r--r--libpthread/linuxthreads.old/spinlock.c8
-rw-r--r--libpthread/linuxthreads.old/spinlock.h22
-rw-r--r--libpthread/linuxthreads.old/sysdeps/mips/pt-machine.h2
-rw-r--r--libpthread/linuxthreads.old/sysdeps/sh64/pt-machine.c2
15 files changed, 72 insertions, 72 deletions
diff --git a/libpthread/linuxthreads.old/cancel.c b/libpthread/linuxthreads.old/cancel.c
index 79409675f..239b821f1 100644
--- a/libpthread/linuxthreads.old/cancel.c
+++ b/libpthread/linuxthreads.old/cancel.c
@@ -101,7 +101,7 @@ int pthread_cancel(pthread_t thread)
/* If the thread has registered an extrication interface, then
invoke the interface. If it returns 1, then we succeeded in
dequeuing the thread from whatever waiting object it was enqueued
- with. In that case, it is our responsibility to wake it up.
+ with. In that case, it is our responsibility to wake it up.
And also to set the p_woken_by_cancel flag so the woken thread
can tell that it was woken by cancellation. */
@@ -122,7 +122,7 @@ int pthread_cancel(pthread_t thread)
if (dorestart)
restart(th);
- else
+ else
kill(pid, __pthread_sig_cancel);
return 0;
diff --git a/libpthread/linuxthreads.old/debug.h b/libpthread/linuxthreads.old/debug.h
index 94b7c084b..76779dad9 100644
--- a/libpthread/linuxthreads.old/debug.h
+++ b/libpthread/linuxthreads.old/debug.h
@@ -8,15 +8,15 @@
** debugging on, add -DDEBUG_PT to CFLAGS. It was added to the original
** distribution of linuxthreads.
**
-** This program is free software; you can redistribute it and/or
-** modify it under the terms of the GNU Library General Public License
-** as published by the Free Software Foundation; either version 2
-** of the License, or (at your option) any later version.
-**
-** This program is distributed in the hope that it will be useful,
-** but WITHOUT ANY WARRANTY; without even the implied warranty of
-** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-** GNU Library General Public License for more details.
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU Library General Public License
+** as published by the Free Software Foundation; either version 2
+** of the License, or (at your option) any later version.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU Library General Public License for more details.
**
****************************************************************************/
diff --git a/libpthread/linuxthreads.old/internals.h b/libpthread/linuxthreads.old/internals.h
index ab227d6cc..38290a5fe 100644
--- a/libpthread/linuxthreads.old/internals.h
+++ b/libpthread/linuxthreads.old/internals.h
@@ -34,7 +34,7 @@
#include <bits/uClibc_locale.h>
#endif /* __UCLIBC_HAS_XLOCALE__ */
-/* Use a funky version in a probably vein attempt at preventing gdb
+/* Use a funky version in a probably vein attempt at preventing gdb
* from dlopen()'ing glibc's libthread_db library... */
#define STRINGIFY(s) STRINGIFY2 (s)
#define STRINGIFY2(s) #s
@@ -305,14 +305,14 @@ extern volatile pthread_descr __pthread_last_event;
/* Return the handle corresponding to a thread id */
-static inline pthread_handle thread_handle(pthread_t id)
+static __inline__ pthread_handle thread_handle(pthread_t id)
{
return &__pthread_handles[id % PTHREAD_THREADS_MAX];
}
/* Validate a thread handle. Must have acquired h->h_spinlock before. */
-static inline int invalid_handle(pthread_handle h, pthread_t id)
+static __inline__ int invalid_handle(pthread_handle h, pthread_t id)
{
return h->h_descr == NULL || h->h_descr->p_tid != id;
}
@@ -381,8 +381,8 @@ extern size_t __pagesize;
extern pthread_descr __pthread_find_self (void) __attribute__ ((const));
-static inline pthread_descr thread_self (void) __attribute__ ((const));
-static inline pthread_descr thread_self (void)
+static __inline__ pthread_descr thread_self (void) __attribute__ ((const));
+static __inline__ pthread_descr thread_self (void)
{
#ifdef THREAD_SELF
return THREAD_SELF;
diff --git a/libpthread/linuxthreads.old/join.c b/libpthread/linuxthreads.old/join.c
index 250b47166..a46d0b68d 100644
--- a/libpthread/linuxthreads.old/join.c
+++ b/libpthread/linuxthreads.old/join.c
@@ -151,7 +151,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
/* If not terminated yet, suspend ourselves. */
if (! th->p_terminated) {
/* Register extrication interface */
- __pthread_set_own_extricate_if(self, &extr);
+ __pthread_set_own_extricate_if(self, &extr);
if (!(THREAD_GETMEM(self, p_canceled)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
th->p_joining = self;
@@ -160,7 +160,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
__pthread_unlock(&handle->h_lock);
if (already_canceled) {
- __pthread_set_own_extricate_if(self, 0);
+ __pthread_set_own_extricate_if(self, 0);
__pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
}
@@ -168,7 +168,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
suspend(self);
PDEBUG("after suspend\n");
/* Deregister extrication interface */
- __pthread_set_own_extricate_if(self, 0);
+ __pthread_set_own_extricate_if(self, 0);
/* This is a cancellation point */
if (THREAD_GETMEM(self, p_woken_by_cancel)
diff --git a/libpthread/linuxthreads.old/manager.c b/libpthread/linuxthreads.old/manager.c
index fc39b1e25..69fdd1cb5 100644
--- a/libpthread/linuxthreads.old/manager.c
+++ b/libpthread/linuxthreads.old/manager.c
@@ -76,7 +76,7 @@ volatile pthread_descr __pthread_last_event;
/* Stack segment numbers are also indices into the __pthread_handles array. */
/* Stack segment number 0 is reserved for the initial thread. */
-static inline pthread_descr thread_segment(int seg)
+static __inline__ pthread_descr thread_segment(int seg)
{
return (pthread_descr)(THREAD_STACK_START_ADDRESS - (seg - 1) * STACK_SIZE)
- 1;
@@ -206,7 +206,7 @@ int attribute_noreturn __pthread_manager(void *arg)
pthread_handle_free(request.req_args.free.thread_id);
break;
case REQ_PROCESS_EXIT:
- PDEBUG("got REQ_PROCESS_EXIT from %d, exit code = %d\n",
+ PDEBUG("got REQ_PROCESS_EXIT from %d, exit code = %d\n",
request.req_thread, request.req_args.exit.code);
pthread_handle_exit(request.req_thread,
request.req_args.exit.code);
@@ -414,7 +414,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
{
stacksize = attr->__stacksize;
}
-
+
/* malloc a stack - memory from the bottom up */
if ((new_thread_bottom = malloc(stacksize)) == NULL)
{
@@ -430,7 +430,7 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
*
* ^ +------------------------+
* | | pthread_descr struct |
- * | +------------------------+ <- new_thread
+ * | +------------------------+ <- new_thread
* malloc block | | |
* | | thread stack |
* | | |
@@ -443,18 +443,18 @@ static int pthread_allocate_stack(const pthread_attr_t *attr,
new_thread = ((pthread_descr) ((int)(new_thread_bottom + stacksize) & -sizeof(void*))) - 1;
guardaddr = NULL;
guardsize = 0;
-
+
PDEBUG("thread stack: bos=%p, tos=%p\n", new_thread_bottom, new_thread);
-
+
/* check the initial thread stack boundaries so they don't overlap */
NOMMU_INITIAL_THREAD_BOUNDS((char *) new_thread, (char *) new_thread_bottom);
-
- PDEBUG("initial stack: bos=%p, tos=%p\n", __pthread_initial_thread_bos,
+
+ PDEBUG("initial stack: bos=%p, tos=%p\n", __pthread_initial_thread_bos,
__pthread_initial_thread_tos);
-
+
/* on non-MMU systems we always have non-standard stack frames */
__pthread_nonstandard_stacks = 1;
-
+
#endif /* __ARCH_USE_MMU__ */
}
@@ -567,7 +567,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
/* ******************************************************** */
/* This code was moved from below to cope with running threads
* on uClinux systems. See comment below...
- * Insert new thread in doubly linked list of active threads */
+ * Insert new thread in doubly linked list of active threads */
new_thread->p_prevlive = __pthread_main_thread;
new_thread->p_nextlive = __pthread_main_thread->p_nextlive;
__pthread_main_thread->p_nextlive->p_prevlive = new_thread;
@@ -640,9 +640,9 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
}
/* Check if cloning succeeded */
if (pid == -1) {
- /********************************************************
+ /********************************************************
* Code inserted to remove the thread from our list of active
- * threads in case of failure (needed to cope with uClinux),
+ * threads in case of failure (needed to cope with uClinux),
* See comment below. */
new_thread->p_nextlive->p_prevlive = new_thread->p_prevlive;
new_thread->p_prevlive->p_nextlive = new_thread->p_nextlive;
diff --git a/libpthread/linuxthreads.old/oldsemaphore.c b/libpthread/linuxthreads.old/oldsemaphore.c
index 178affa1b..2a7b40acc 100644
--- a/libpthread/linuxthreads.old/oldsemaphore.c
+++ b/libpthread/linuxthreads.old/oldsemaphore.c
@@ -35,7 +35,7 @@ typedef struct {
/* Maximum value the semaphore can have. */
#define SEM_VALUE_MAX ((int) ((~0u) >> 1))
-static inline int sem_compare_and_swap(old_sem_t *sem, long oldval, long newval)
+static __inline__ int sem_compare_and_swap(old_sem_t *sem, long oldval, long newval)
{
return compare_and_swap(&sem->sem_status, oldval, newval, &sem->sem_spinlock);
}
@@ -92,7 +92,7 @@ int __old_sem_wait(old_sem_t * sem)
while (1) {
/* Register extrication interface */
- __pthread_set_own_extricate_if(self, &extr);
+ __pthread_set_own_extricate_if(self, &extr);
do {
oldstatus = sem->sem_status;
if ((oldstatus & 1) && (oldstatus != 1))
@@ -105,12 +105,12 @@ int __old_sem_wait(old_sem_t * sem)
while (! sem_compare_and_swap(sem, oldstatus, newstatus));
if (newstatus & 1) {
/* We got the semaphore. */
- __pthread_set_own_extricate_if(self, 0);
+ __pthread_set_own_extricate_if(self, 0);
return 0;
}
/* Wait for sem_post or cancellation */
suspend(self);
- __pthread_set_own_extricate_if(self, 0);
+ __pthread_set_own_extricate_if(self, 0);
/* This is a cancellation point */
if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
diff --git a/libpthread/linuxthreads.old/ptfork.c b/libpthread/linuxthreads.old/ptfork.c
index 184508a3d..c34ea8104 100644
--- a/libpthread/linuxthreads.old/ptfork.c
+++ b/libpthread/linuxthreads.old/ptfork.c
@@ -73,7 +73,7 @@ int pthread_atfork(void (*prepare)(void),
}
//strong_alias (__pthread_atfork, pthread_atfork)
-static inline void pthread_call_handlers(struct handler_list * list)
+static __inline__ void pthread_call_handlers(struct handler_list * list)
{
for (/*nothing*/; list != NULL; list = list->next) (list->handler)();
}
diff --git a/libpthread/linuxthreads.old/pthread.c b/libpthread/linuxthreads.old/pthread.c
index 53b531465..35de4b731 100644
--- a/libpthread/linuxthreads.old/pthread.c
+++ b/libpthread/linuxthreads.old/pthread.c
@@ -386,7 +386,7 @@ static int *__libc_multiple_threads_ptr;
startup of the C library. */
void __pthread_initialize_minimal(void)
{
- /* If we have special thread_self processing, initialize
+ /* If we have special thread_self processing, initialize
* that for the main thread now. */
#ifdef INIT_THREAD_SELF
INIT_THREAD_SELF(&__pthread_initial_thread, 0);
@@ -434,7 +434,7 @@ static void pthread_initialize(void)
{ /* uClibc-specific stdio initialization for threads. */
FILE *fp;
-
+
_stdio_user_locking = 0; /* 2 if threading not initialized */
for (fp = _stdio_openlist; fp != NULL; fp = fp->__nextopen) {
if (fp->__user_locking != 1) {
@@ -458,7 +458,7 @@ static void pthread_initialize(void)
}
#else
/* For non-MMU assume __pthread_initial_thread_tos at upper page boundary, and
- * __pthread_initial_thread_bos at address 0. These bounds are refined as we
+ * __pthread_initial_thread_bos at address 0. These bounds are refined as we
* malloc other stack frames such that they don't overlap. -StS
*/
__pthread_initial_thread_tos =
@@ -674,7 +674,7 @@ pthread_t pthread_self(void)
return THREAD_GETMEM(self, p_tid);
}
libpthread_hidden_def (pthread_self)
-
+
int pthread_equal(pthread_t thread1, pthread_t thread2)
{
return thread1 == thread2;
@@ -820,7 +820,7 @@ static void pthread_handle_sigcancel(int sig)
{
pthread_descr self = thread_self();
sigjmp_buf * jmpbuf;
-
+
if (self == &__pthread_manager_thread)
{
diff --git a/libpthread/linuxthreads.old/queue.h b/libpthread/linuxthreads.old/queue.h
index c7f8471b9..01d18d16e 100644
--- a/libpthread/linuxthreads.old/queue.h
+++ b/libpthread/linuxthreads.old/queue.h
@@ -18,7 +18,7 @@
linked through their p_nextwaiting field. The lists are kept
sorted by decreasing priority, and then decreasing waiting time. */
-static inline void enqueue(pthread_descr * q, pthread_descr th)
+static __inline__ void enqueue(pthread_descr * q, pthread_descr th)
{
int prio = th->p_priority;
for (; *q != NULL; q = &((*q)->p_nextwaiting)) {
@@ -31,7 +31,7 @@ static inline void enqueue(pthread_descr * q, pthread_descr th)
*q = th;
}
-static inline pthread_descr dequeue(pthread_descr * q)
+static __inline__ pthread_descr dequeue(pthread_descr * q)
{
pthread_descr th;
th = *q;
@@ -42,7 +42,7 @@ static inline pthread_descr dequeue(pthread_descr * q)
return th;
}
-static inline int remove_from_queue(pthread_descr * q, pthread_descr th)
+static __inline__ int remove_from_queue(pthread_descr * q, pthread_descr th)
{
for (; *q != NULL; q = &((*q)->p_nextwaiting)) {
if (*q == th) {
@@ -54,7 +54,7 @@ static inline int remove_from_queue(pthread_descr * q, pthread_descr th)
return 0;
}
-static inline int queue_is_empty(pthread_descr * q)
+static __inline__ int queue_is_empty(pthread_descr * q)
{
return *q == NULL;
}
diff --git a/libpthread/linuxthreads.old/restart.h b/libpthread/linuxthreads.old/restart.h
index 687d92fae..7d63a7022 100644
--- a/libpthread/linuxthreads.old/restart.h
+++ b/libpthread/linuxthreads.old/restart.h
@@ -18,7 +18,7 @@
/* Primitives for controlling thread execution */
-static inline void restart(pthread_descr th)
+static __inline__ void restart(pthread_descr th)
{
/* See pthread.c */
#if __ASSUME_REALTIME_SIGNALS
@@ -28,7 +28,7 @@ static inline void restart(pthread_descr th)
#endif
}
-static inline void suspend(pthread_descr self)
+static __inline__ void suspend(pthread_descr self)
{
/* See pthread.c */
#if __ASSUME_REALTIME_SIGNALS
@@ -38,7 +38,7 @@ static inline void suspend(pthread_descr self)
#endif
}
-static inline int timedsuspend(pthread_descr self,
+static __inline__ int timedsuspend(pthread_descr self,
const struct timespec *abstime)
{
/* See pthread.c */
diff --git a/libpthread/linuxthreads.old/rwlock.c b/libpthread/linuxthreads.old/rwlock.c
index e25ab3ea0..eaf71e77c 100644
--- a/libpthread/linuxthreads.old/rwlock.c
+++ b/libpthread/linuxthreads.old/rwlock.c
@@ -60,7 +60,7 @@ rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
{
pthread_readlock_info *info = self->p_readlock_free;
- if (info != NULL)
+ if (info != NULL)
self->p_readlock_free = info->pr_next;
else
info = malloc(sizeof *info);
@@ -100,7 +100,7 @@ rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
return info;
}
}
-
+
return NULL;
}
@@ -137,7 +137,7 @@ rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
* This function helps support brain-damaged recursive read locking
* semantics required by Unix 98, while maintaining write priority.
* This basically determines whether this thread already holds a read lock
- * already. It returns 1 if so, otherwise it returns 0.
+ * already. It returns 1 if so, otherwise it returns 0.
*
* If the thread has any ``untracked read locks'' then it just assumes
* that this lock is among them, just to be safe, and returns 1.
@@ -259,7 +259,7 @@ pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
else
self->p_untracked_readlock_count++;
}
-
+
return 0;
}
@@ -277,7 +277,7 @@ pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
__pthread_lock (&rwlock->__rw_lock, self);
/* 0 is passed to here instead of have_lock_already.
- This is to meet Single Unix Spec requirements:
+ This is to meet Single Unix Spec requirements:
if writers are waiting, pthread_rwlock_tryrdlock
does not acquire a read lock, even if the caller has
one or more read locks already. */
@@ -300,7 +300,7 @@ pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
self->p_untracked_readlock_count++;
}
}
-
+
return retval;
}
diff --git a/libpthread/linuxthreads.old/spinlock.c b/libpthread/linuxthreads.old/spinlock.c
index f5999e4d2..24c81d47e 100644
--- a/libpthread/linuxthreads.old/spinlock.c
+++ b/libpthread/linuxthreads.old/spinlock.c
@@ -30,11 +30,11 @@ libpthread_hidden_proto(nanosleep)
static void __pthread_acquire(int * spinlock);
-static inline void __pthread_release(int * spinlock)
+static __inline__ void __pthread_release(int * spinlock)
{
WRITE_MEMORY_BARRIER();
*spinlock = __LT_SPINLOCK_INIT;
- __asm __volatile__ ("" : "=m" (*spinlock) : "m" (*spinlock));
+ __asm__ __volatile__ ("" : "=m" (*spinlock) : "m" (*spinlock));
}
@@ -110,12 +110,12 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
#ifdef BUSY_WAIT_NOP
BUSY_WAIT_NOP;
#endif
- __asm __volatile ("" : "=m" (lock->__status) : "m" (lock->__status));
+ __asm__ __volatile__ ("" : "=m" (lock->__status) : "m" (lock->__status));
}
lock->__spinlock += (spin_count - lock->__spinlock) / 8;
}
-#endif
+#endif
again:
diff --git a/libpthread/linuxthreads.old/spinlock.h b/libpthread/linuxthreads.old/spinlock.h
index 0ec40c57c..7117898f7 100644
--- a/libpthread/linuxthreads.old/spinlock.h
+++ b/libpthread/linuxthreads.old/spinlock.h
@@ -39,7 +39,7 @@ extern int __pthread_has_cas;
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock);
-static inline int compare_and_swap(long * ptr, long oldval, long newval,
+static __inline__ int compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock)
{
if (__builtin_expect (__pthread_has_cas, 1))
@@ -56,7 +56,7 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
#ifdef HAS_COMPARE_AND_SWAP_WITH_RELEASE_SEMANTICS
-static inline int
+static __inline__ int
compare_and_swap_with_release_semantics (long * ptr, long oldval,
long newval, int * spinlock)
{
@@ -66,7 +66,7 @@ compare_and_swap_with_release_semantics (long * ptr, long oldval,
#endif
-static inline int compare_and_swap(long * ptr, long oldval, long newval,
+static __inline__ int compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock)
{
return __compare_and_swap(ptr, oldval, newval);
@@ -77,7 +77,7 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
extern int __pthread_compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock);
-static inline int compare_and_swap(long * ptr, long oldval, long newval,
+static __inline__ int compare_and_swap(long * ptr, long oldval, long newval,
int * spinlock)
{
return __pthread_compare_and_swap(ptr, oldval, newval, spinlock);
@@ -96,13 +96,13 @@ extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self);
extern int __pthread_unlock(struct _pthread_fastlock *lock);
-static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
+static __inline__ void __pthread_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
lock->__spinlock = __LT_SPINLOCK_INIT;
}
-static inline int __pthread_trylock (struct _pthread_fastlock * lock)
+static __inline__ int __pthread_trylock (struct _pthread_fastlock * lock)
{
#if defined TEST_FOR_COMPARE_AND_SWAP
if (!__pthread_has_cas)
@@ -133,13 +133,13 @@ extern int __pthread_alt_timedlock(struct _pthread_fastlock * lock,
extern void __pthread_alt_unlock(struct _pthread_fastlock *lock);
-static inline void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
+static __inline__ void __pthread_alt_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
lock->__spinlock = __LT_SPINLOCK_INIT;
}
-static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
+static __inline__ int __pthread_alt_trylock (struct _pthread_fastlock * lock)
{
#if defined TEST_FOR_COMPARE_AND_SWAP
if (!__pthread_has_cas)
@@ -172,7 +172,7 @@ static inline int __pthread_alt_trylock (struct _pthread_fastlock * lock)
/* Operations on pthread_atomic, which is defined in internals.h */
-static inline long atomic_increment(struct pthread_atomic *pa)
+static __inline__ long atomic_increment(struct pthread_atomic *pa)
{
long oldval;
@@ -184,7 +184,7 @@ static inline long atomic_increment(struct pthread_atomic *pa)
}
-static inline long atomic_decrement(struct pthread_atomic *pa)
+static __inline__ long atomic_decrement(struct pthread_atomic *pa)
{
long oldval;
@@ -196,7 +196,7 @@ static inline long atomic_decrement(struct pthread_atomic *pa)
}
-static inline void
+static __inline__ void
__pthread_set_own_extricate_if (pthread_descr self, pthread_extricate_if *peif)
{
/* Only store a non-null peif if the thread has cancellation enabled.
diff --git a/libpthread/linuxthreads.old/sysdeps/mips/pt-machine.h b/libpthread/linuxthreads.old/sysdeps/mips/pt-machine.h
index 638952846..fb1cc0e6d 100644
--- a/libpthread/linuxthreads.old/sysdeps/mips/pt-machine.h
+++ b/libpthread/linuxthreads.old/sysdeps/mips/pt-machine.h
@@ -33,7 +33,7 @@
/* Copyright (C) 2000, 2002 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Maciej W. Rozycki <macro@ds2.pg.gda.pl>, 2000. */
-static inline int
+static __inline__ int
__NTH (_test_and_set (int *p, int v))
{
int r, t;
diff --git a/libpthread/linuxthreads.old/sysdeps/sh64/pt-machine.c b/libpthread/linuxthreads.old/sysdeps/sh64/pt-machine.c
index ea4881322..bd2c401fc 100644
--- a/libpthread/linuxthreads.old/sysdeps/sh64/pt-machine.c
+++ b/libpthread/linuxthreads.old/sysdeps/sh64/pt-machine.c
@@ -26,7 +26,7 @@
/* Spinlock implementation; required. */
-/* The SH5 does not have a suitable test-and-set instruction (SWAP only
+/* The SH5 does not have a suitable test-and-set instruction (SWAP only
operates on an aligned quad word). So we use the SH4 version instead.
This must be seperately compiled in SHcompact mode, so it cannot be
inline. */