/*
* Detects 64 bits mode
*/
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
- || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
#define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - A16(p))
+
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
#define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned(v, (u16 *)(p)); \
- p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ put_unaligned_le16(v, (u16 *)(p)); \
+ p += 2; \
} while (0)
#endif
#endif
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
#define LZ4_WILDCOPY(s, d, e) \
do { \
LZ4_COPYPACKET(s, d); \