This is an automated email from the ASF dual-hosted git repository.
hellostephen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 15f410ee5e7 [Fix](mac) Fix macos compile about BYTE_ORDER and
BASE_HAS_ATOMIC64 and USE_UNWIND (#50997)
15f410ee5e7 is described below
commit 15f410ee5e7c1f1d744733b1f91435f4370cc947
Author: Jensen <[email protected]>
AuthorDate: Mon May 19 15:54:13 2025 +0800
[Fix](mac) Fix macos compile about BYTE_ORDER and BASE_HAS_ATOMIC64 and
USE_UNWIND (#50997)
---
be/src/common/stack_trace.cpp | 4 ++--
be/src/gutil/atomic_refcount.h | 2 +-
be/src/util/byte_stream_split.cpp | 2 +-
be/src/util/coding.h | 16 ++++++++--------
4 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/be/src/common/stack_trace.cpp b/be/src/common/stack_trace.cpp
index 19a585a908e..022ce90aa91 100644
--- a/be/src/common/stack_trace.cpp
+++ b/be/src/common/stack_trace.cpp
@@ -38,7 +38,7 @@
#include "vec/common/demangle.h"
#include "vec/common/hex.h"
-#if USE_UNWIND && defined(__x86_64__)
+#if defined(USE_UNWIND) && USE_UNWIND && defined(__x86_64__)
#include <libunwind.h>
#else
#include <execinfo.h>
@@ -299,7 +299,7 @@ StackTrace::StackTrace(const ucontext_t& signal_context) {
void StackTrace::tryCapture() {
// When unw_backtrace is not available, fall back on the standard
// `backtrace` function from execinfo.h.
-#if USE_UNWIND && defined(__x86_64__) // TODO
+#if defined(USE_UNWIND) && USE_UNWIND && defined(__x86_64__) // TODO
size = unw_backtrace(frame_pointers.data(), capacity);
#else
size = backtrace(frame_pointers.data(), capacity);
diff --git a/be/src/gutil/atomic_refcount.h b/be/src/gutil/atomic_refcount.h
index 9457d84dc68..b96788027de 100644
--- a/be/src/gutil/atomic_refcount.h
+++ b/be/src/gutil/atomic_refcount.h
@@ -94,7 +94,7 @@ inline bool RefCountIsZero(const volatile Atomic32* ptr) {
return subtle::Acquire_Load(ptr) == 0;
}
-#if BASE_HAS_ATOMIC64
+#ifdef BASE_HAS_ATOMIC64
// Implementations for Atomic64, if available.
inline void RefCountIncN(volatile base::subtle::Atomic64* ptr,
base::subtle::Atomic64 increment) {
DCHECK_GT(increment, 0);
diff --git a/be/src/util/byte_stream_split.cpp
b/be/src/util/byte_stream_split.cpp
index 23f6ad4fc6d..94d112f6dab 100644
--- a/be/src/util/byte_stream_split.cpp
+++ b/be/src/util/byte_stream_split.cpp
@@ -38,7 +38,7 @@ inline void do_merge_streams(const uint8_t** src_streams, int
width, int64_t nva
for (int i = 0; i < kBlockSize; i += 8) {
uint64_t v;
std::memcpy(&v, src + i, sizeof(v));
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
dest[stream + i * width] = static_cast<uint8_t>(v);
dest[stream + (i + 1) * width] = static_cast<uint8_t>(v >> 8);
dest[stream + (i + 2) * width] = static_cast<uint8_t>(v >> 16);
diff --git a/be/src/util/coding.h b/be/src/util/coding.h
index ee9ade4729f..3368174f5bf 100644
--- a/be/src/util/coding.h
+++ b/be/src/util/coding.h
@@ -28,7 +28,7 @@ inline void encode_fixed8(uint8_t* buf, uint8_t val) {
}
inline void encode_fixed16_le(uint8_t* buf, uint16_t val) {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
memcpy(buf, &val, sizeof(val));
#else
uint16_t res = bswap_16(val);
@@ -37,7 +37,7 @@ inline void encode_fixed16_le(uint8_t* buf, uint16_t val) {
}
inline void encode_fixed32_le(uint8_t* buf, uint32_t val) {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
memcpy(buf, &val, sizeof(val));
#else
uint32_t res = bswap_32(val);
@@ -46,7 +46,7 @@ inline void encode_fixed32_le(uint8_t* buf, uint32_t val) {
}
inline void encode_fixed64_le(uint8_t* buf, uint64_t val) {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
memcpy(buf, &val, sizeof(val));
#else
uint64_t res = gbswap_64(val);
@@ -55,7 +55,7 @@ inline void encode_fixed64_le(uint8_t* buf, uint64_t val) {
}
inline void encode_fixed128_le(uint8_t* buf, uint128_t val) {
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
memcpy(buf, &val, sizeof(val));
#else
uint128_t res = gbswap_128(val);
@@ -70,7 +70,7 @@ inline uint8_t decode_fixed8(const uint8_t* buf) {
inline uint16_t decode_fixed16_le(const uint8_t* buf) {
uint16_t res;
memcpy(&res, buf, sizeof(res));
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return res;
#else
return bswap_16(res);
@@ -80,7 +80,7 @@ inline uint16_t decode_fixed16_le(const uint8_t* buf) {
inline uint32_t decode_fixed32_le(const uint8_t* buf) {
uint32_t res;
memcpy(&res, buf, sizeof(res));
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return res;
#else
return bswap_32(res);
@@ -90,7 +90,7 @@ inline uint32_t decode_fixed32_le(const uint8_t* buf) {
inline uint64_t decode_fixed64_le(const uint8_t* buf) {
uint64_t res;
memcpy(&res, buf, sizeof(res));
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return res;
#else
return gbswap_64(res);
@@ -100,7 +100,7 @@ inline uint64_t decode_fixed64_le(const uint8_t* buf) {
inline uint128_t decode_fixed128_le(const uint8_t* buf) {
uint128_t res;
memcpy(&res, buf, sizeof(res));
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
return res;
#else
return gbswap_128(res);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]