aboutsummaryrefslogtreecommitdiffstats
path: root/main/uclibc/0008-malloc-fix-race-condition-and-other-bugs-in-the-no-m.patch
blob: 48e8427d28b20653f9d526a9138425381eda6e60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
From fa476d01f1c1990a92ee49d1f1c557b83805d0e9 Mon Sep 17 00:00:00 2001
From: Freeman Wang <xwang@ubicom.com>
Date: Sat, 19 Dec 2009 13:43:00 -0800
Subject: [PATCH 8/9] malloc: fix race condition and other bugs in the no-mmu malloc

Fixes multiple race conditions on mmb list. This was done by
making the mmb_heap_lock into a recursive lock and making the
regular heap_lock extend to cover the mmb heap handling.

Also move the new_mmb allocation up to before the mmb list is
iterated through to find the insertion point. When the mmb_heap
also runs out and needs to be extended when the regular heap is
just extended, the mmb list could be messed up.

Signed-off-by: Freeman Wang <xwang@ubicom.com>
Signed-off-by: Austin Foxley <austinf@cetoncorp.com>
---
 libc/stdlib/malloc/free.c   |    6 +++---
 libc/stdlib/malloc/malloc.c |    7 ++++---
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/libc/stdlib/malloc/free.c b/libc/stdlib/malloc/free.c
index 90e18f4..741248a 100644
--- a/libc/stdlib/malloc/free.c
+++ b/libc/stdlib/malloc/free.c
@@ -179,14 +179,14 @@ __free_to_heap (void *mem, struct heap_free_area **heap
 	      /* Start searching again from the end of this block.  */
 	      start = mmb_end;
 
+	      /* Release the descriptor block we used.  */
+	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+
 	      /* We have to unlock the heap before we recurse to free the mmb
 		 descriptor, because we might be unmapping from the mmb
 		 heap.  */
               __heap_unlock (heap_lock);
 
-	      /* Release the descriptor block we used.  */
-	      free_to_heap (mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
-
 	      /* Do the actual munmap.  */
 	      munmap ((void *)mmb_start, mmb_end - mmb_start);
 
diff --git a/libc/stdlib/malloc/malloc.c b/libc/stdlib/malloc/malloc.c
index 71f9e58..84a6acd 100644
--- a/libc/stdlib/malloc/malloc.c
+++ b/libc/stdlib/malloc/malloc.c
@@ -48,7 +48,7 @@ struct malloc_mmb *__malloc_mmapped_blocks = 0;
 HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
 struct heap_free_area *__malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
 #ifdef HEAP_USE_LOCKING
-pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t __malloc_mmb_heap_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
 #endif
 #endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
 
@@ -151,19 +151,19 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
 	  /* Try again to allocate.  */
 	  mem = __heap_alloc (heap, &size);
 
-	  __heap_unlock (heap_lock);
 
 #if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
 	  /* Insert a record of BLOCK in sorted order into the
 	     __malloc_mmapped_blocks list.  */
 
+	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
+
 	  for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
 	       mmb;
 	       prev_mmb = mmb, mmb = mmb->next)
 	    if (block < mmb->mem)
 	      break;
 
-	  new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap, &__malloc_mmb_heap_lock);
 	  new_mmb->next = mmb;
 	  new_mmb->mem = block;
 	  new_mmb->size = block_size;
@@ -177,6 +177,7 @@ __malloc_from_heap (size_t size, struct heap_free_area **heap
 			    (unsigned)new_mmb,
 			    (unsigned)new_mmb->mem, block_size);
 #endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
+	  __heap_unlock (heap_lock);
 	}
     }
 
-- 
1.6.6.1