aboutsummaryrefslogtreecommitdiffstats
path: root/community/rethinkdb/support-aarch64.patch
blob: 0e36d6969a9a14bbee097fae8735cef7f300753a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
--- a/configure
+++ b/configure
@@ -81,7 +81,7 @@
     case "${MACHINE%%-*}" in
         x86_64|i?86)
             true ;;
-        arm*)
+        arm* | aarch64)
             var_append LDFLAGS -ldl
             final_warning="ARM support is still experimental" ;;
         *)
--- a/src/rpc/connectivity/cluster.cc
+++ b/src/rpc/connectivity/cluster.cc
@@ -103,7 +103,7 @@
     return false;
 }

-#if defined (__x86_64__) || defined (_WIN64)
+#if defined (__x86_64__) || defined (_WIN64) || defined (__aarch64__)
 const std::string connectivity_cluster_t::cluster_arch_bitsize("64bit");
 #elif defined (__i386__) || defined(__arm__)
 const std::string connectivity_cluster_t::cluster_arch_bitsize("32bit");
--- a/mk/support/pkg/v8.sh
+++ b/mk/support/pkg/v8.sh
@@ -39,11 +39,13 @@
     fi
     arch_gypflags=
     raspberry_pi_gypflags='-Darm_version=6 -Darm_fpu=vfpv2'
+    v8_gypflags='-Darm_version=8 -Darm_fpu=vfpv4'
     host=$($CXX -dumpmachine)
     case ${host%%-*} in
         i?86)   arch=ia32 ;;
         x86_64) arch=x64 ;;
         arm*)   arch=arm; arch_gypflags=$raspberry_pi_gypflags ;;
+        aarch64)   arch=arm64; arch_gypflags=$v8_gypflags ;;
         *)      arch=native ;;
     esac
     mode=release
--- a/src/arch/runtime/context_switching.cc
+++ b/src/arch/runtime/context_switching.cc
@@ -98,7 +98,7 @@
     the stack by swapcontext; they're callee-saved, so whatever happens to be in
     them will be ignored. */
     sp -= 6;
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
     /* We must preserve r4, r5, r6, r7, r8, r9, r10, and r11. Because we have to store the LR (r14) in swapcontext as well, we also store r12 in swapcontext to keep the stack double-word-aligned. However, we already accounted for both of those by decrementing sp twice above (once for r14 and once for r12, say). */
     sp -= 8;
 #else
@@ -262,7 +262,7 @@
 }

 asm(
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
 // We keep the i386, x86_64, and ARM stuff interleaved in order to enforce commonality.
 #if defined(__x86_64__)
 #if defined(__LP64__) || defined(__LLP64__)
@@ -279,7 +279,7 @@
     /* `current_pointer_out` is in `4(%ebp)`. `dest_pointer` is in `8(%ebp)`. */
 #elif defined(__x86_64__)
     /* `current_pointer_out` is in `%rdi`. `dest_pointer` is in `%rsi`. */
-#elif defined(__arm__)
+#elif defined(__arm__) || defined(__aarch64__)
     /* `current_pointer_out` is in `r0`. `dest_pointer` is in `r1` */
 #endif

@@ -302,6 +302,17 @@
     "push {r12}\n"
     "push {r14}\n"
     "push {r4-r11}\n"
+#elif defined(__aarch64__)
+    "str   x12, [sp, #-8]!\n"
+    "str   x14, [sp, #-8]!\n"
+    "str   x4, [sp, #-8]!\n"
+    "str   x5, [sp, #-8]!\n"
+    "str   x6, [sp, #-8]!\n"
+    "str   x7, [sp, #-8]!\n"
+    "str   x8, [sp, #-8]!\n"
+    "str   x9, [sp, #-8]!\n"
+    "str   x10, [sp, #-8]!\n"
+    "str   x11, [sp, #-8]!\n"
 #endif

     /* Save old stack pointer. */
@@ -316,6 +327,9 @@
 #elif defined(__arm__)
     /* On ARM, the first argument is in `r0`. `r13` is the stack pointer. */
     "str r13, [r0]\n"
+#elif defined(__aarch64__)
+    /* On aarch64, the first argument is in `x0`. `sp` is the stack pointer. */
+    "str x0, [sp, #-8]\n"
 #endif

     /* Load the new stack pointer and the preserved registers. */
@@ -330,6 +344,9 @@
 #elif defined(__arm__)
     /* On ARM, the second argument is in `r1` */
     "mov r13, r1\n"
+#elif defined(__aarch64__)
+    /* On aarch64, the second argument is in `x1` */
+    "mov x13, x1\n"
 #endif

 #if defined(__i386__)
@@ -348,6 +365,17 @@
     "pop {r4-r11}\n"
     "pop {r14}\n"
     "pop {r12}\n"
+#elif defined(__aarch64__)
+    "ldr   x4, [sp], #8\n"
+    "ldr   x5, [sp], #8\n"
+    "ldr   x6, [sp], #8\n"
+    "ldr   x7, [sp], #8\n"
+    "ldr   x8, [sp], #8\n"
+    "ldr   x9, [sp], #8\n"
+    "ldr   x10, [sp], #8\n"
+    "ldr   x11, [sp], #8\n"
+    "ldr   x14, [sp], #8\n"
+    "ldr   x12, [sp], #8\n"
 #endif

 #if defined(__i386__) || defined(__x86_64__)
@@ -360,6 +388,8 @@
     /* Above, we popped `LR` (`r14`) off the stack, so the bx instruction will
     jump to the correct return address. */
     "bx r14\n"
+#elif defined(__aarch64__)
+    "ret\n"
 #endif

 #else