1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h
index f834bdb..22407ef 100644
--- a/snappy-stubs-internal.h
+++ b/snappy-stubs-internal.h
@@ -123,7 +123,7 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// x86, PowerPC, and ARM64 can simply do these loads and stores native.
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__aarch64__)
+ defined(__aarch64__) || defined(__s390x__)
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
@@ -150,6 +150,8 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// (it ignores __attribute__((packed)) on individual variables). However,
// we can tell it that a _struct_ is unaligned, which has the same effect,
// so we do that.
+//
+// On pre-R6 MIPS just let the compiler use LWL/LWR, SWL/SWR etc.
#elif defined(__arm__) && \
!defined(__ARM_ARCH_4__) && \
@@ -163,7 +165,8 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
!defined(__ARM_ARCH_6K__) && \
!defined(__ARM_ARCH_6Z__) && \
!defined(__ARM_ARCH_6ZK__) && \
- !defined(__ARM_ARCH_6T2__)
+ !defined(__ARM_ARCH_6T2__) || \
+ (defined(__mips__) && (!defined(__mips_isa_rev) || __mips_isa_rev < 6))
#if __GNUC__
#define ATTRIBUTE_PACKED __attribute__((__packed__))
@@ -184,6 +187,11 @@ struct Unaligned32Struct {
uint8 dummy; // To make the size non-power-of-two.
} ATTRIBUTE_PACKED;
+struct Unaligned64Struct {
+ uint64 value;
+ uint8 dummy; // To make the size non-power-of-two.
+} ATTRIBUTE_PACKED;
+
} // namespace internal
} // namespace base
@@ -203,6 +211,7 @@ struct Unaligned32Struct {
// See if that would be more efficient on platforms supporting it,
// at least for copies.
+#ifndef __mips__
inline uint64 UNALIGNED_LOAD64(const void *p) {
uint64 t;
memcpy(&t, p, sizeof t);
@@ -212,6 +221,13 @@ inline uint64 UNALIGNED_LOAD64(const void *p) {
inline void UNALIGNED_STORE64(void *p, uint64 v) {
memcpy(p, &v, sizeof v);
}
+#else
+#define UNALIGNED_LOAD64(_p) \
+ ((reinterpret_cast<const ::snappy::base::internal::Unaligned64Struct *>(_p))->value)
+#define UNALIGNED_STORE64(_p, _val) \
+ ((reinterpret_cast< ::snappy::base::internal::Unaligned64Struct *>(_p))->value = \
+ (_val))
+#endif
#else
@@ -274,6 +290,13 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) {
#define bswap_32(x) OSSwapInt32(x)
#define bswap_64(x) OSSwapInt64(x)
+#elif defined(__GNUC__)
+/* musl provides suboptimal (generic) bswap_xx implementations in <byteswap.h> */
+
+#define bswap_16 __builtin_bswap16
+#define bswap_32 __builtin_bswap32
+#define bswap_64 __builtin_bswap64
+
#elif defined(HAVE_BYTESWAP_H)
#include <byteswap.h>
|