summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/link.h11
-rw-r--r--ldso/include/dl-hash.h56
-rw-r--r--ldso/include/ldso.h4
-rw-r--r--ldso/include/ldsodefs.h55
-rw-r--r--ldso/ldso/dl-tls.c797
-rw-r--r--ldso/ldso/ldso.c41
-rw-r--r--ldso/ldso/mips/elfinterp.c2
-rw-r--r--libpthread/nptl/sysdeps/generic/libc-tls.c2
8 files changed, 875 insertions, 93 deletions
diff --git a/include/link.h b/include/link.h
index be0b3b83b..5b0a13e60 100644
--- a/include/link.h
+++ b/include/link.h
@@ -25,8 +25,9 @@
#include <elf.h>
#include <dlfcn.h>
#include <sys/types.h>
+/* Defines USE_TLS */
#if defined(IS_IN_libpthread) || defined(IS_IN_rtld)
-#include <tls.h> /* Defines USE_TLS. */
+#include <tls.h>
#endif
/* We use this macro to refer to ELF types independent of the native wordsize.
@@ -114,12 +115,8 @@ struct link_map
ptrdiff_t l_tls_offset;
/* Index of the module in the dtv array. */
size_t l_tls_modid;
- enum /* Where this object came from. */
- {
- lt_executable, /* The main executable program. */
- lt_library, /* Library needed by main executable. */
- lt_loaded /* Extra run-time loaded shared object. */
- } l_type:2;
+ /* Nonzero if _dl_init_static_tls should be called for this module */
+ unsigned int l_need_tls_init:1;
#endif
};
diff --git a/ldso/include/dl-hash.h b/ldso/include/dl-hash.h
index cdaea10f0..a0e0130f9 100644
--- a/ldso/include/dl-hash.h
+++ b/ldso/include/dl-hash.h
@@ -10,7 +10,7 @@ struct init_fini {
unsigned long nlist; /* Number of entries in init_fini */
};
-struct dyn_elf{
+struct dyn_elf {
struct elf_resolve * dyn;
struct dyn_elf * next_handle; /* Used by dlopen et al. */
struct init_fini init_fini;
@@ -18,15 +18,40 @@ struct dyn_elf{
struct dyn_elf * prev;
};
-struct elf_resolve{
- /* These entries must be in this order to be compatible with the interface used
- by gdb to obtain the list of symbols. */
+struct elf_resolve {
+ /* These entries must be in this order to be compatible with the interface
+ * used by gdb to obtain the list of symbols. */
ElfW(Addr) loadaddr; /* Base address shared object is loaded at. */
char *libname; /* Absolute file name object was found in. */
ElfW(Dyn) *dynamic_addr; /* Dynamic section of the shared object. */
struct elf_resolve * next;
struct elf_resolve * prev;
/* Nothing after this address is used by gdb. */
+
+#if USE_TLS
+ /* Thread-local storage related info. */
+
+ /* Start of the initialization image. */
+ void *l_tls_initimage;
+ /* Size of the initialization image. */
+ size_t l_tls_initimage_size;
+ /* Size of the TLS block. */
+ size_t l_tls_blocksize;
+ /* Alignment requirement of the TLS block. */
+ size_t l_tls_align;
+ /* Offset of first byte module alignment. */
+ size_t l_tls_firstbyte_offset;
+# ifndef NO_TLS_OFFSET
+# define NO_TLS_OFFSET 0
+# endif
+ /* For objects present at startup time: offset in the static TLS block. */
+ ptrdiff_t l_tls_offset;
+ /* Index of the module in the dtv array. */
+ size_t l_tls_modid;
+ /* Nonzero if _dl_init_static_tls should be called for this module */
+ unsigned int l_need_tls_init:1;
+#endif
+
enum {elf_lib, elf_executable,program_interpreter, loaded_file} libtype;
struct dyn_elf * symbol_scope;
unsigned short usage_count;
@@ -52,29 +77,6 @@ struct elf_resolve{
dev_t st_dev; /* device */
ino_t st_ino; /* inode */
-#if USE_TLS
- /* Thread-local storage related info. */
-
- /* Start of the initialization image. */
- void *l_tls_initimage;
- /* Size of the initialization image. */
- size_t l_tls_initimage_size;
- /* Size of the TLS block. */
- size_t l_tls_blocksize;
- /* Alignment requirement of the TLS block. */
- size_t l_tls_align;
- /* Offset of first byte module alignment. */
- size_t l_tls_firstbyte_offset;
-# ifndef NO_TLS_OFFSET
-# define NO_TLS_OFFSET 0
-# endif
- /* For objects present at startup time: offset in the static TLS block. */
- ptrdiff_t l_tls_offset;
- /* Index of the module in the dtv array. */
- size_t l_tls_modid;
- /* Nonzero if _dl_init_static_tls should be called for this module */
- unsigned int l_need_tls_init:1;
-#endif
#ifdef __powerpc__
/* this is used to store the address of relocation data words, so
* we don't have to calculate it every time, which requires a divide */
diff --git a/ldso/include/ldso.h b/ldso/include/ldso.h
index 95635c3d5..8f95d929b 100644
--- a/ldso/include/ldso.h
+++ b/ldso/include/ldso.h
@@ -30,11 +30,11 @@
#include <bits/uClibc_page.h>
/* Now the ldso specific headers */
#include <dl-elf.h>
-#include <dl-hash.h>
-/* Defines USE_TLS */
#ifdef __UCLIBC_HAS_THREADS_NATIVE__
+/* Defines USE_TLS */
#include <tls.h>
#endif
+#include <dl-hash.h>
/* For INIT/FINI dependency sorting. */
struct init_fini_list {
diff --git a/ldso/include/ldsodefs.h b/ldso/include/ldsodefs.h
index ce4c375da..4b8b0e3d3 100644
--- a/ldso/include/ldsodefs.h
+++ b/ldso/include/ldsodefs.h
@@ -62,38 +62,32 @@ rtld_hidden_proto (_dl_allocate_tls_init)
extern void _dl_deallocate_tls (void *tcb, bool dealloc_tcb) internal_function;
rtld_hidden_proto (_dl_deallocate_tls)
-#if defined USE_TLS
extern void _dl_nothread_init_static_tls (struct link_map *) attribute_hidden;
-#endif
- /* Keep the conditional TLS members at the end so the layout of the
- structure used by !USE_TLS code matches the prefix of the layout in
- the USE_TLS rtld. Note that `struct link_map' is conditionally
- defined as well, so _dl_rtld_map needs to be last before this. */
-#ifdef USE_TLS
- /* Highest dtv index currently needed. */
- EXTERN size_t _dl_tls_max_dtv_idx;
- /* Flag signalling whether there are gaps in the module ID allocation. */
- EXTERN bool _dl_tls_dtv_gaps;
- /* Information about the dtv slots. */
- EXTERN struct dtv_slotinfo_list
+
+/* Highest dtv index currently needed. */
+EXTERN size_t _dl_tls_max_dtv_idx;
+/* Flag signalling whether there are gaps in the module ID allocation. */
+EXTERN bool _dl_tls_dtv_gaps;
+/* Information about the dtv slots. */
+EXTERN struct dtv_slotinfo_list
+{
+ size_t len;
+ struct dtv_slotinfo_list *next;
+ struct dtv_slotinfo
{
- size_t len;
- struct dtv_slotinfo_list *next;
- struct dtv_slotinfo
- {
- size_t gen;
- bool is_static;
- struct link_map *map;
- } slotinfo[0];
- } *_dl_tls_dtv_slotinfo_list;
- /* Number of modules in the static TLS block. */
- EXTERN size_t _dl_tls_static_nelem;
- /* Size of the static TLS block. */
- EXTERN size_t _dl_tls_static_size;
- /* Size actually allocated in the static TLS block. */
- EXTERN size_t _dl_tls_static_used;
- /* Alignment requirement of the static TLS block. */
- EXTERN size_t _dl_tls_static_align;
+ size_t gen;
+ bool is_static;
+ struct link_map *map;
+ } slotinfo[0];
+} *_dl_tls_dtv_slotinfo_list;
+/* Number of modules in the static TLS block. */
+EXTERN size_t _dl_tls_static_nelem;
+/* Size of the static TLS block. */
+EXTERN size_t _dl_tls_static_size;
+/* Size actually allocated in the static TLS block. */
+EXTERN size_t _dl_tls_static_used;
+/* Alignment requirement of the static TLS block. */
+EXTERN size_t _dl_tls_static_align;
/* Number of additional entries in the slotinfo array of each slotinfo
list element. A large number makes it almost certain take we never
@@ -109,6 +103,5 @@ extern void _dl_nothread_init_static_tls (struct link_map *) attribute_hidden;
EXTERN size_t _dl_tls_generation;
EXTERN void (*_dl_init_static_tls) (struct link_map *);
-#endif
#endif
diff --git a/ldso/ldso/dl-tls.c b/ldso/ldso/dl-tls.c
index aec763620..666a9319c 100644
--- a/ldso/ldso/dl-tls.c
+++ b/ldso/ldso/dl-tls.c
@@ -28,8 +28,21 @@
#include <tls.h>
#include <dl-tls.h>
+#include <ldsodefs.h>
-/* Taken from glibc/sysdeps/generic/dl-tls.c */
+#define calloc(a, b) NULL
+#define malloc(a) NULL
+#define realloc(a, b) NULL
+#define free(a)
+#define _dl_memalign(a, b) NULL
+
+/* The __tls_get_addr function has two basic forms which differ in the
+ arguments. The IA-64 form takes two parameters, the module ID and
+ offset. The form used, among others, on IA-32 takes a reference to
+ a special structure which contain the same information. The second
+ form seems to be more often used (in the moment) so we default to
+ it. Users of the IA-64 form have to provide adequate definitions
+ of the following macros. */
#ifndef GET_ADDR_ARGS
# define GET_ADDR_ARGS tls_index *ti
#endif
@@ -40,6 +53,16 @@
# define GET_ADDR_OFFSET ti->ti_offset
#endif
+/*
+ * Amount of excess space to allocate in the static TLS area
+ * to allow dynamic loading of modules defining IE-model TLS data.
+ */
+#define TLS_STATIC_SURPLUS 64 + DL_NNS * 100
+
+/* Value used for dtv entries for which the allocation is delayed. */
+#define TLS_DTV_UNALLOCATED ((void *) -1l)
+
+
/* Taken from glibc/elf/dl-reloc.c */
#define CHECK_STATIC_TLS(sym_map) \
do { \
@@ -47,50 +70,774 @@
_dl_allocate_static_tls (sym_map); \
} while (0)
-/* Taken from glibc/elf/dl-reloc.c */
void
internal_function __attribute_noinline__
-_dl_allocate_static_tls (struct elf_resolve *map)
+_dl_allocate_static_tls (struct link_map *map)
{
_dl_dprintf(2, "_dl_allocate_static_tls NOT IMPLEMENTED!\n");
_dl_exit(1);
return;
}
+/* Initialize static TLS area and DTV for current (only) thread.
+ libpthread implementations should provide their own hook
+ to handle all threads. */
+void
+internal_function __attribute_noinline__
+_dl_nothread_init_static_tls (struct link_map *map)
+{
+# if TLS_TCB_AT_TP
+ void *dest = (char *) THREAD_SELF - map->l_tls_offset;
+# elif TLS_DTV_AT_TP
+ void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
+# else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+# endif
+
+ /* Fill in the DTV slot so that a later LD/GD access will find it. */
+ dtv_t *dtv = THREAD_DTV ();
+ if (!(map->l_tls_modid <= dtv[-1].counter)) {
+ _dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n");
+ _dl_exit(1);
+ }
+ dtv[map->l_tls_modid].pointer.val = dest;
+ dtv[map->l_tls_modid].pointer.is_static = true;
+
+ /* Initialize the memory. */
+ _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
+ _dl_memset((dest + map->l_tls_initimage_size), '\0',
+ map->l_tls_blocksize - map->l_tls_initimage_size);
+}
+
/* Taken from glibc/sysdeps/generic/dl-tls.c */
+static void
+__attribute__ ((__noreturn__))
+oom (void)
+{
+ do {
+ _dl_dprintf (_dl_debug_file,
+ "cannot allocate thread-local memory: ABORT\n");
+ _dl_exit (127);
+ } while (1);
+}
+
+size_t
+internal_function
+_dl_next_tls_modid (void)
+{
+ size_t result;
+
+ if (__builtin_expect (_dl_tls_dtv_gaps, false))
+ {
+ size_t disp = 0;
+ struct dtv_slotinfo_list *runp = _dl_tls_dtv_slotinfo_list;
+
+ /* Note that this branch will never be executed during program
+ start since there are no gaps at that time. Therefore it
+ does not matter that the dl_tls_dtv_slotinfo is not allocated
+ yet when the function is called for the first times.
+
+ NB: the offset +1 is due to the fact that DTV[0] is used
+ for something else. */
+ result = _dl_tls_static_nelem + 1;
+ if (result <= _dl_tls_max_dtv_idx)
+ do
+ {
+ while (result - disp < runp->len)
+ {
+ if (runp->slotinfo[result - disp].map == NULL)
+ break;
+
+ ++result;
+ assert (result <= _dl_tls_max_dtv_idx + 1);
+ }
+
+ if (result - disp < runp->len)
+ break;
+
+ disp += runp->len;
+ }
+ while ((runp = runp->next) != NULL);
+
+ if (result > _dl_tls_max_dtv_idx)
+ {
+ /* The new index must indeed be exactly one higher than the
+ previous high. */
+ assert (result == _dl_tls_max_dtv_idx + 1);
+ /* There is no gap anymore. */
+ _dl_tls_dtv_gaps = false;
+
+ goto nogaps;
+ }
+ }
+ else
+ {
+ /* No gaps, allocate a new entry. */
+ nogaps:
+
+ result = ++_dl_tls_max_dtv_idx;
+ }
+
+ return result;
+}
+
+void
+internal_function
+_dl_determine_tlsoffset (void)
+{
+ size_t max_align = TLS_TCB_ALIGN;
+ size_t freetop = 0;
+ size_t freebottom = 0;
+
+ /* The first element of the dtv slot info list is allocated. */
+ assert (_dl_tls_dtv_slotinfo_list != NULL);
+ /* There is at this point only one element in the
+ dl_tls_dtv_slotinfo_list list. */
+ assert (_dl_tls_dtv_slotinfo_list->next == NULL);
+
+ struct dtv_slotinfo *slotinfo = _dl_tls_dtv_slotinfo_list->slotinfo;
+
+ /* Determining the offset of the various parts of the static TLS
+ block has several dependencies. In addition we have to work
+ around bugs in some toolchains.
+
+ Each TLS block from the objects available at link time has a size
+ and an alignment requirement. The GNU ld computes the alignment
+ requirements for the data at the positions *in the file*, though.
+ I.e, it is not simply possible to allocate a block with the size
+ of the TLS program header entry. The data is layed out assuming
+ that the first byte of the TLS block fulfills
+
+ p_vaddr mod p_align == &TLS_BLOCK mod p_align
+
+ This means we have to add artificial padding at the beginning of
+ the TLS block. These bytes are never used for the TLS data in
+ this module but the first byte allocated must be aligned
+ according to mod p_align == 0 so that the first byte of the TLS
+ block is aligned according to p_vaddr mod p_align. This is ugly
+ and the linker can help by computing the offsets in the TLS block
+ assuming the first byte of the TLS block is aligned according to
+ p_align.
+
+ The extra space which might be allocated before the first byte of
+ the TLS block need not go unused. The code below tries to use
+ that memory for the next TLS block. This can work if the total
+ memory requirement for the next TLS block is smaller than the
+ gap. */
+
+# if TLS_TCB_AT_TP
+ /* We simply start with zero. */
+ size_t offset = 0;
+
+ for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
+ {
+ assert (cnt < _dl_tls_dtv_slotinfo_list->len);
+
+ size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
+ & (slotinfo[cnt].map->l_tls_align - 1));
+ size_t off;
+ max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
+
+ if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
+ {
+ off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
+ - firstbyte, slotinfo[cnt].map->l_tls_align)
+ + firstbyte;
+ if (off <= freebottom)
+ {
+ freetop = off;
+
+ /* XXX For some architectures we perhaps should store the
+ negative offset. */
+ slotinfo[cnt].map->l_tls_offset = off;
+ continue;
+ }
+ }
+
+ off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
+ slotinfo[cnt].map->l_tls_align) + firstbyte;
+ if (off > offset + slotinfo[cnt].map->l_tls_blocksize
+ + (freebottom - freetop))
+ {
+ freetop = offset;
+ freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
+ }
+ offset = off;
+
+ /* XXX For some architectures we perhaps should store the
+ negative offset. */
+ slotinfo[cnt].map->l_tls_offset = off;
+ }
+
+ _dl_tls_static_used = offset;
+ _dl_tls_static_size = (roundup (offset + TLS_STATIC_SURPLUS, max_align)
+ + TLS_TCB_SIZE);
+# elif TLS_DTV_AT_TP
+ /* The TLS blocks start right after the TCB. */
+ size_t offset = TLS_TCB_SIZE;
+ size_t cnt;
+
+ for (cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
+ {
+ assert (cnt < _dl_tls_dtv_slotinfo_list->len);
+
+ size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
+ & (slotinfo[cnt].map->l_tls_align - 1));
+ size_t off;
+ max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
+
+ if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
+ {
+ off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
+ if (off - freebottom < firstbyte)
+ off += slotinfo[cnt].map->l_tls_align;
+ if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
+ {
+ slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
+ - firstbyte);
+ continue;
+ }
+ }
+
+ off = roundup (offset, slotinfo[cnt].map->l_tls_align);
+ if (off - offset < firstbyte)
+ off += slotinfo[cnt].map->l_tls_align;
+
+ slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ if (off - firstbyte - offset > freetop - freebottom)
+ {
+ freebottom = offset;
+ freetop = off - firstbyte;
+ }
+
+ offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
+ }
+
+ _dl_tls_static_used = offset;
+ _dl_tls_static_size = roundup (offset + TLS_STATIC_SURPLUS,
+ TLS_TCB_ALIGN);
+# else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+# endif
+
+ /* The alignment requirement for the static TLS block. */
+ _dl_tls_static_align = max_align;
+}
+
+/* This is called only when the data structure setup was skipped at startup,
+ when there was no need for it then. Now we have dynamically loaded
+ something needing TLS, or libpthread needs it. */
+int
+internal_function
+_dl_tls_setup (void)
+{
+ assert (_dl_tls_dtv_slotinfo_list == NULL);
+ assert (_dl_tls_max_dtv_idx == 0);
+
+ const size_t nelem = 2 + TLS_SLOTINFO_SURPLUS;
+
+ _dl_tls_dtv_slotinfo_list
+ = calloc (1, (sizeof (struct dtv_slotinfo_list)
+ + nelem * sizeof (struct dtv_slotinfo)));
+ if (_dl_tls_dtv_slotinfo_list == NULL)
+ return -1;
+
+ _dl_tls_dtv_slotinfo_list->len = nelem;
+
+ /* Number of elements in the static TLS block. It can't be zero
+ because of various assumptions. The one element is null. */
+ _dl_tls_static_nelem = _dl_tls_max_dtv_idx = 1;
+
+ /* This initializes more variables for us. */
+ _dl_determine_tlsoffset ();
+
+ return 0;
+}
+rtld_hidden_def (_dl_tls_setup)
+
+static void *
+internal_function
+allocate_dtv (void *result)
+{
+ dtv_t *dtv;
+ size_t dtv_length;
+
+ /* We allocate a few more elements in the dtv than are needed for the
+ initial set of modules. This should avoid in most cases expansions
+ of the dtv. */
+ dtv_length = _dl_tls_max_dtv_idx + DTV_SURPLUS;
+ dtv = calloc (dtv_length + 2, sizeof (dtv_t));
+ if (dtv != NULL)
+ {
+ /* This is the initial length of the dtv. */
+ dtv[0].counter = dtv_length;
+
+ /* The rest of the dtv (including the generation counter) is
+ Initialize with zero to indicate nothing there. */
+
+ /* Add the dtv to the thread data structures. */
+ INSTALL_DTV (result, dtv);
+ }
+ else
+ result = NULL;
+
+ return result;
+}
+
+/* Get size and alignment requirements of the static TLS block. */
+void
+internal_function
+_dl_get_tls_static_info (size_t *sizep, size_t *alignp)
+{
+ *sizep = _dl_tls_static_size;
+ *alignp = _dl_tls_static_align;
+}
+
void *
-__tls_get_addr (GET_ADDR_ARGS)
+internal_function
+_dl_allocate_tls_storage (void)
{
- dtv_t *dtv = THREAD_DTV ();
- struct link_map *the_map = NULL;
- void *p;
+ void *result;
+ size_t size = _dl_tls_static_size;
+
+# if TLS_DTV_AT_TP
+ /* Memory layout is:
+ [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
+ ^ This should be returned. */
+ size += (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
+ & ~(_dl_tls_static_align - 1);
+# endif
+
+ /* Allocate a correctly aligned chunk of memory. */
+ result = _dl_memalign (_dl_tls_static_align, size);
+ if (__builtin_expect (result != NULL, 1))
+ {
+ /* Allocate the DTV. */
+ void *allocated = result;
- if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0))
- the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
+# if TLS_TCB_AT_TP
+ /* The TCB follows the TLS blocks. */
+ result = (char *) result + size - TLS_TCB_SIZE;
- p = dtv[GET_ADDR_MODULE].pointer.val;
+ /* Clear the TCB data structure. We can't ask the caller (i.e.
+ libpthread) to do it, because we will initialize the DTV et al. */
+ _dl_memset (result, '\0', TLS_TCB_SIZE);
+# elif TLS_DTV_AT_TP
+ result = (char *) result + size - _dl_tls_static_size;
- if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
+ We can't ask the caller (i.e. libpthread) to do it, because we will
+ initialize the DTV et al. */
+ _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
+ TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
+# endif
+
+ result = allocate_dtv (result);
+ if (result == NULL)
+ free (allocated);
+ }
+
+ return result;
+}
+
+void *
+internal_function
+_dl_allocate_tls_init (void *result)
+{
+ if (result == NULL)
+ /* The memory allocation failed. */
+ return NULL;
+
+ dtv_t *dtv = GET_DTV (result);
+ struct dtv_slotinfo_list *listp;
+ size_t total = 0;
+ size_t maxgen = 0;
+
+ /* We have to prepare the dtv for all currently loaded modules using
+ TLS. For those which are dynamically loaded we add the values
+ indicating deferred allocation. */
+ listp = _dl_tls_dtv_slotinfo_list;
+ while (1)
+ {
+ size_t cnt;
+
+ for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
+ {
+ struct link_map *map;
+ void *dest;
+
+ /* Check for the total number of used slots. */
+ if (total + cnt > _dl_tls_max_dtv_idx)
+ break;
+
+ map = listp->slotinfo[cnt].map;
+ if (map == NULL)
+ /* Unused entry. */
+ continue;
+
+ /* Keep track of the maximum generation number. This might
+ not be the generation counter. */
+ maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);
+
+ if (map->l_tls_offset == NO_TLS_OFFSET)
+ {
+ /* For dynamically loaded modules we simply store
+ the value indicating deferred allocation. */
+ dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
+ dtv[map->l_tls_modid].pointer.is_static = false;
+ continue;
+ }
+
+ assert (map->l_tls_modid == cnt);
+ assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
+# if TLS_TCB_AT_TP
+ assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_tls_offset;
+# elif TLS_DTV_AT_TP
+ dest = (char *) result + map->l_tls_offset;
+# else
+# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
+# endif
+
+ /* Copy the initialization image and clear the BSS part. */
+ dtv[map->l_tls_modid].pointer.val = dest;
+ dtv[map->l_tls_modid].pointer.is_static = true;
+ _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
+ _dl_memset((dest + map->l_tls_initimage_size), '\0',
+ map->l_tls_blocksize - map->l_tls_initimage_size);
+
+ }
+
+ total += cnt;
+ if (total >= _dl_tls_max_dtv_idx)
+ break;
+
+ listp = listp->next;
+ assert (listp != NULL);
+ }
+
+ /* The DTV version is up-to-date now. */
+ dtv[0].counter = maxgen;
+
+ return result;
+}
+rtld_hidden_def (_dl_allocate_tls_init)
+
+void *
+internal_function
+_dl_allocate_tls (void *mem)
+{
+ return _dl_allocate_tls_init (mem == NULL
+ ? _dl_allocate_tls_storage ()
+ : allocate_dtv (mem));
+}
+rtld_hidden_def (_dl_allocate_tls)
+
+void
+internal_function
+_dl_deallocate_tls (void *tcb, bool dealloc_tcb)
+{
+ dtv_t *dtv = GET_DTV (tcb);
+ size_t cnt;
+
+ /* We need to free the memory allocated for non-static TLS. */
+ for (cnt = 0; cnt < dtv[-1].counter; ++cnt)
+ if (! dtv[1 + cnt].pointer.is_static
+ && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
+ free (dtv[1 + cnt].pointer.val);
+
+ /* The array starts with dtv[-1]. */
+ if (dtv != _dl_initial_dtv)
+ free (dtv - 1);
+
+ if (dealloc_tcb)
+ {
+# if TLS_TCB_AT_TP
+ /* The TCB follows the TLS blocks. Back up to free the whole block. */
+ tcb -= _dl_tls_static_size - TLS_TCB_SIZE;
+# elif TLS_DTV_AT_TP
+ /* Back up the TLS_PRE_TCB_SIZE bytes. */
+ tcb -= (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
+ & ~(_dl_tls_static_align - 1);
+# endif
+ free (tcb);
+ }
+}
+rtld_hidden_def (_dl_deallocate_tls)
+
+static void *
+allocate_and_init (struct link_map *map)
+{
+ void *newp;
+
+ newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
+ if (newp == NULL)
{
- /* The allocation was deferred. Do it now. */
- if (the_map == NULL)
+ _dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__);
+ _dl_exit(1);
+ }
+
+ /* Initialize the memory. */
+ _dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size);
+ _dl_memset ((newp + map->l_tls_initimage_size), '\0',
+ map->l_tls_blocksize - map->l_tls_initimage_size);
+
+ return newp;
+}
+
+struct link_map *
+_dl_update_slotinfo (unsigned long int req_modid)
+{
+ struct link_map *the_map = NULL;
+ dtv_t *dtv = THREAD_DTV ();
+
+ /* The global dl_tls_dtv_slotinfo array contains for each module
+ index the generation counter current when the entry was created.
+ This array never shrinks so that all module indices which were
+ valid at some time can be used to access it. Before the first
+ use of a new module index in this function the array was extended
+ appropriately. Access also does not have to be guarded against
+ modifications of the array. It is assumed that pointer-size
+ values can be read atomically even in SMP environments. It is
+ possible that other threads at the same time dynamically load
+ code and therefore add to the slotinfo list. This is a problem
+ since we must not pick up any information about incomplete work.
+ The solution to this is to ignore all dtv slots which were
+ created after the one we are currently interested. We know that
+ dynamic loading for this module is completed and this is the last
+ load operation we know finished. */
+ unsigned long int idx = req_modid;
+ struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
+
+ while (idx >= listp->len)
+ {
+ idx -= listp->len;
+ listp = listp->next;
+ }
+
+ if (dtv[0].counter < listp->slotinfo[idx].gen)
+ {
+ /* The generation counter for the slot is higher than what the
+ current dtv implements. We have to update the whole dtv but
+ only those entries with a generation counter <= the one for
+ the entry we need. */
+ size_t new_gen = listp->slotinfo[idx].gen;
+ size_t total = 0;
+
+ /* We have to look through the entire dtv slotinfo list. */
+ listp = _dl_tls_dtv_slotinfo_list;
+ do
+ {
+ size_t cnt;
+
+ for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
+ {
+ size_t gen = listp->slotinfo[cnt].gen;
+
+ if (gen > new_gen)
+ /* This is a slot for a generation younger than the
+ one we are handling now. It might be incompletely
+ set up so ignore it. */
+ continue;
+
+ /* If the entry is older than the current dtv layout we
+ know we don't have to handle it. */
+ if (gen <= dtv[0].counter)
+ continue;
+
+ /* If there is no map this means the entry is empty. */
+ struct link_map *map = listp->slotinfo[cnt].map;
+ if (map == NULL)
+ {
+ /* If this modid was used at some point the memory
+ might still be allocated. */
+ if (! dtv[total + cnt].pointer.is_static
+ && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
+ {
+ free (dtv[total + cnt].pointer.val);
+ dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
+ }
+
+ continue;
+ }
+
+ /* Check whether the current dtv array is large enough. */
+ size_t modid = map->l_tls_modid;
+ assert (total + cnt == modid);
+ if (dtv[-1].counter < modid)
{
- /* Find the link map for this module. */
- size_t idx = GET_ADDR_MODULE;
- struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
+ /* Reallocate the dtv. */
+ dtv_t *newp;
+ size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS;
+ size_t oldsize = dtv[-1].counter;
+
+ assert (map->l_tls_modid <= newsize);
+
+ if (dtv == _dl_initial_dtv)
+ {
+ /* This is the initial dtv that was allocated
+ during rtld startup using the dl-minimal.c
+ malloc instead of the real malloc. We can't
+ free it, we have to abandon the old storage. */
+
+ newp = malloc ((2 + newsize) * sizeof (dtv_t));
+ if (newp == NULL)
+ oom ();
+ _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
+ }
+ else
+ {
+ newp = realloc (&dtv[-1],
+ (2 + newsize) * sizeof (dtv_t));
+ if (newp == NULL)
+ oom ();
+ }
- while (idx >= listp->len)
- {
- idx -= listp->len;
- listp = listp->next;
- }
+ newp[0].counter = newsize;
- the_map = listp->slotinfo[idx].map;
+ /* Clear the newly allocated part. */
+ _dl_memset (newp + 2 + oldsize, '\0',
+ (newsize - oldsize) * sizeof (dtv_t));
+
+ /* Point dtv to the generation counter. */
+ dtv = &newp[1];
+
+ /* Install this new dtv in the thread data
+ structures. */
+ INSTALL_NEW_DTV (dtv);
}
- p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
- dtv[GET_ADDR_MODULE].pointer.is_static = false;
+ /* If there is currently memory allocate for this
+ dtv entry free it. */
+ /* XXX Ideally we will at some point create a memory
+ pool. */
+ if (! dtv[modid].pointer.is_static
+ && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
+ /* Note that free is called for NULL is well. We
+ deallocate even if it is this dtv entry we are
+ supposed to load. The reason is that we call
+ memalign and not malloc. */
+ free (dtv[modid].pointer.val);
+
+ /* This module is loaded dynamically- We defer memory
+ allocation. */
+ dtv[modid].pointer.is_static = false;
+ dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
+
+ if (modid == req_modid)
+ the_map = map;
+ }
+
+ total += listp->len;
}
+ while ((listp = listp->next) != NULL);
+
+ /* This will be the new maximum generation counter. */
+ dtv[0].counter = new_gen;
+ }
+
+ return the_map;
+}
+
+
+/* The generic dynamic and local dynamic model cannot be used in
+ statically linked applications. */
+void *
+__tls_get_addr (GET_ADDR_ARGS)
+{
+ dtv_t *dtv = THREAD_DTV ();
+ struct link_map *the_map = NULL;
+ void *p;
+
+ if (__builtin_expect (dtv[0].counter != _dl_tls_generation, 0))
+ the_map = _dl_update_slotinfo (GET_ADDR_MODULE);
+
+ p = dtv[GET_ADDR_MODULE].pointer.val;
+
+ if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ {
+ /* The allocation was deferred. Do it now. */
+ if (the_map == NULL)
+ {
+ /* Find the link map for this module. */
+ size_t idx = GET_ADDR_MODULE;
+ struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;
+
+ while (idx >= listp->len)
+ {
+ idx -= listp->len;
+ listp = listp->next;
+ }
+
+ the_map = listp->slotinfo[idx].map;
+ }
+
+ p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
+ dtv[GET_ADDR_MODULE].pointer.is_static = false;
+ }
+
+ return (char *) p + GET_ADDR_OFFSET;
+}
+
+void
+_dl_add_to_slotinfo (struct link_map *l)
+{
+ /* Now that we know the object is loaded successfully add
+ modules containing TLS data to the dtv info table. We
+ might have to increase its size. */
+ struct dtv_slotinfo_list *listp;
+ struct dtv_slotinfo_list *prevp;
+ size_t idx = l->l_tls_modid;
+
+ /* Find the place in the dtv slotinfo list. */
+ listp = _dl_tls_dtv_slotinfo_list;
+ prevp = NULL; /* Needed to shut up gcc. */
+ do
+ {
+ /* Does it fit in the array of this list element? */
+ if (idx < listp->len)
+ break;
+ idx -= listp->len;
+ prevp = listp;
+ listp = listp->next;
+ }
+ while (listp != NULL);
+
+ if (listp == NULL)
+ {
+ /* When we come here it means we have to add a new element
+ to the slotinfo list. And the new module must be in
+ the first slot. */
+ assert (idx == 0);
+
+ listp = prevp->next = (struct dtv_slotinfo_list *)
+ malloc (sizeof (struct dtv_slotinfo_list)
+ + TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
+ if (listp == NULL)
+ {
+ /* We ran out of memory. We will simply fail this
+ call but don't undo anything we did so far. The
+ application will crash or be terminated anyway very
+ soon. */
+
+ /* We have to do this since some entries in the dtv
+ slotinfo array might already point to this
+ generation. */
+ ++_dl_tls_generation;
+
+ _dl_dprintf (_dl_debug_file,
+ "cannot create TLS data structures: ABORT\n");
+ _dl_exit (127);
+ }
+
+ listp->len = TLS_SLOTINFO_SURPLUS;
+ listp->next = NULL;
+ _dl_memset (listp->slotinfo, '\0',
+ TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
+ }
- return (char *) p + GET_ADDR_OFFSET;
+ /* Add the information into the slotinfo data structure. */
+ listp->slotinfo[idx].map = l;
+ listp->slotinfo[idx].gen = _dl_tls_generation + 1;
}
diff --git a/ldso/ldso/ldso.c b/ldso/ldso/ldso.c
index cbf947966..17022faff 100644
--- a/ldso/ldso/ldso.c
+++ b/ldso/ldso/ldso.c
@@ -229,6 +229,47 @@ void _dl_get_ready_to_run(struct elf_resolve *tpnt, unsigned long load_addr,
}
/*
+ * This adds another loop in the non-NPTL case, but we have to
+ * catch the stupid user who tries to run a binary with TLS data
+ * in it. For NPTL, we fill in the TLS data for the application
+ * like we are supposed to.
+ */
+ {
+ int i;
+ ElfW(Phdr) *ppnt = (ElfW(Phdr) *) auxvt[AT_PHDR].a_un.a_val;
+
+ for (i = 0; i < auxvt[AT_PHNUM].a_un.a_val; i++, ppnt++)
+ if (ppnt->p_type == PT_TLS) {
+#if USE_TLS
+ if (ppnt->p_memsz > 0) {
+ /*
+ * Note that in the case the dynamic linker we duplicate
+ * work here since we read the PT_TLS entry already in
+ * _dl_start_final. But the result is repeatable so do
+ * not check for this special but unimportant case.
+ */
+ app_tpnt->l_tls_blocksize = ppnt->p_memsz;
+ app_tpnt->l_tls_align = ppnt->p_align;
+ if (ppnt->p_align == 0)
+ app_tpnt->l_tls_firstbyte_offset = 0;
+ else
+ app_tpnt->l_tls_firstbyte_offset =
+ (ppnt->p_vaddr & (ppnt->p_align - 1));
+ app_tpnt->l_tls_initimage_size = ppnt->p_filesz;
+ app_tpnt->l_tls_initimage = (void *) ppnt->p_vaddr;
+
+ /* This image gets the ID one. */
+ _dl_tls_max_dtv_idx = app_tpnt->l_tls_modid = 1;
+ }
+ break;
+#else
+ _dl_dprintf(_dl_debug_file, "Program uses TLS, but ld-uClibc.so does not support it!\n");
+ _dl_exit(1);
+#endif
+ }
+ }
+
+ /*
* This is used by gdb to locate the chain of shared libraries that are
* currently loaded.
*/
diff --git a/ldso/ldso/mips/elfinterp.c b/ldso/ldso/mips/elfinterp.c
index e7e1db439..c9b9be65a 100644
--- a/ldso/ldso/mips/elfinterp.c
+++ b/ldso/ldso/mips/elfinterp.c
@@ -155,7 +155,7 @@ int _dl_parse_relocation_information(struct dyn_elf *xpnt,
case R_MIPS_TLS_TPREL32:
case R_MIPS_TLS_TPREL64:
- CHECK_STATIC_TLS (tpnt);
+ CHECK_STATIC_TLS ((struct link_map *) tpnt);
*(ElfW(Word) *)reloc_addr +=
(tpnt->l_tls_offset +
symtab[symtab_index].st_value -
diff --git a/libpthread/nptl/sysdeps/generic/libc-tls.c b/libpthread/nptl/sysdeps/generic/libc-tls.c
index 361c74c17..cb451b252 100644
--- a/libpthread/nptl/sysdeps/generic/libc-tls.c
+++ b/libpthread/nptl/sysdeps/generic/libc-tls.c
@@ -209,7 +209,9 @@ __libc_setup_tls (size_t tcbsize, size_t tcbalign)
static_map.l_tls_blocksize = memsz;
static_map.l_tls_initimage = initimage;
static_map.l_tls_initimage_size = filesz;
+#ifndef __UCLIBC__
static_map.l_type = lt_executable;
+#endif
static_map.l_tls_modid = 1;
init_slotinfo ();