VMSTATE_UINTTL* macros should be used only in files compiled with knowledge of virtual address size of specific CPU. Currently this is achieved by protecting VMSTATE_UINTTL* definitions with "#ifndef NEED_CPU_H" in hw/hw.h, but it would be more appropriate to move these macros to cpu-defs.h header.
Signed-off-by: Igor Mitsyanko <i.mitsya...@samsung.com> --- cpu-defs.h | 13 +++++++++++++ hw/hw.h | 19 ------------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/cpu-defs.h b/cpu-defs.h index 4527cbf..10516e9 100644 --- a/cpu-defs.h +++ b/cpu-defs.h @@ -30,6 +30,8 @@ #include "osdep.h" #include "qemu-queue.h" #include "targphys.h" +#include "ioport.h" +#include "vmstate.h" #ifndef TARGET_LONG_BITS #error TARGET_LONG_BITS must be defined before including this header @@ -50,12 +52,20 @@ typedef uint32_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT))); #define TARGET_FMT_lx "%08x" #define TARGET_FMT_ld "%d" #define TARGET_FMT_lu "%u" +#define VMSTATE_UINTTL_V(_f, _s, _v) \ + VMSTATE_UINT32_V(_f, _s, _v) +#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) #elif TARGET_LONG_SIZE == 8 typedef int64_t target_long __attribute__((aligned(TARGET_LONG_ALIGNMENT))); typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT))); #define TARGET_FMT_lx "%016" PRIx64 #define TARGET_FMT_ld "%" PRId64 #define TARGET_FMT_lu "%" PRIu64 +#define VMSTATE_UINTTL_V(_f, _s, _v) \ + VMSTATE_UINT64_V(_f, _s, _v) +#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) #else #error TARGET_LONG_SIZE undefined #endif @@ -68,6 +78,9 @@ typedef uint64_t target_ulong __attribute__((aligned(TARGET_LONG_ALIGNMENT))); #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) +#define VMSTATE_UINTTL(_f, _s) VMSTATE_UINTTL_V(_f, _s, 0) +#define VMSTATE_UINTTL_ARRAY(_f, _s, _n) VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, 0) + /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for addresses on the same page. The top bits are the same. This allows TLB invalidation to quickly clear a subset of the hash table. */ diff --git a/hw/hw.h b/hw/hw.h index e5cb9bf..fb66156 100644 --- a/hw/hw.h +++ b/hw/hw.h @@ -46,23 +46,4 @@ typedef int QEMUBootSetHandler(void *opaque, const char *boot_devices); void qemu_register_boot_set(QEMUBootSetHandler *func, void *opaque); int qemu_boot_set(const char *boot_devices); -#ifdef NEED_CPU_H -#if TARGET_LONG_BITS == 64 -#define VMSTATE_UINTTL_V(_f, _s, _v) \ - VMSTATE_UINT64_V(_f, _s, _v) -#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_UINT64_ARRAY_V(_f, _s, _n, _v) -#else -#define VMSTATE_UINTTL_V(_f, _s, _v) \ - VMSTATE_UINT32_V(_f, _s, _v) -#define VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, _v) \ - VMSTATE_UINT32_ARRAY_V(_f, _s, _n, _v) -#endif -#define VMSTATE_UINTTL(_f, _s) \ - VMSTATE_UINTTL_V(_f, _s, 0) -#define VMSTATE_UINTTL_ARRAY(_f, _s, _n) \ - VMSTATE_UINTTL_ARRAY_V(_f, _s, _n, 0) - -#endif - #endif -- 1.7.4.1