Bastian Koppelmann writes: > There is no overview, that shows all the frontend operation one can use, as > found on the wiki. Unfortunatly the wiki is out of date, so let's try to move > this documentation to the source files, which has the benefit, that it is easy > to update the documentation, if the frontend is changed. This patch adds > doxygen > tags to all the 32 bit versions of the tcg frontend operations, because the > 64 bit version would mostly have the same documentation, and all the type > conversition operations. The file tag has a note, that makes the user aware of > the missing 64 operations. In this version all the immediate variants are also > documented by simply refering to the non immediate version. However I'm > willing > to drop that.
> Any comments? The operations (or at least most of them) are already documented in tcg/README. If this change is accepted, I'd rather move the contents of the README file into here. Also, AFAIR it was decided to use gtk-doc instead of doxygen. Cheers, Lluis > Signed-off-by: Bastian Koppelmann <kbast...@mail.uni-paderborn.de> > --- > tcg/tcg-op.h | 477 > ++++++++++++++++++++++++++++++++++++++++++++++++++++++----- > 1 file changed, 436 insertions(+), 41 deletions(-) > diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h > index 96adf9a..5bb7e65 100644 > --- a/tcg/tcg-op.h > +++ b/tcg/tcg-op.h > @@ -26,6 +26,29 @@ > #include "exec/helper-proto.h" > #include "exec/helper-gen.h" > +/** > + * @file tcg-op.h > + * @brief These are the supported operations as implemented by the TCG > frontend > + * for the target cpu (what QEMU executes; not where QEMU executes). > + * This information is useful for people who want to port QEMU to > + * emulate a new processor. > + * The frontend helpers for generating TCG operations typically take > the > + * form: tcg_gen_<op>[i]_<reg_size>. > + * - The <op> is the TCG operation that will be generated for its > + * arguments > + * - The [i] suffix is used to indicate the TCG operation takes an > + * immediate rather than a normal register. > + * - The <reg_size> refers to the size of the TCG registers in use. > + * The vast majority of the time, this will match the native size > of > + * the emulated target, so rather than force people to type i32 or > + * i64 all the time, the shorthand tl is made available for all > + * helpers. > + * @note This documentation only covers the 32 bit operations. If not > stated > + * otherwise, every operation is also available as a 64 bit > operation. > + * If no suffix <reg_size> is given, the operation is available for > 32 > + * and 64 bit guests, e.g. tcg_gen_br. > + */ > + > /* Basic output routines. Not for general consumption. */ > void tcg_gen_op1(TCGContext *, TCGOpcode, TCGArg); > @@ -251,13 +274,24 @@ static inline void tcg_gen_op6ii_i64(TCGOpcode opc, > TCGv_i64 a1, TCGv_i64 a2, > /* Generic ops. */ > +/** > + * Create a new label, used for branch instructions. > + * \sa tcg_gen_br > + */ > int gen_new_label(void); > - > +/** > + * Label the current location with label n, so branch instructions can jump > + * there. > + * \sa tcg_gen_br, gen_new_label > + */ > static inline void gen_set_label(int n) > { > tcg_gen_op1(&tcg_ctx, INDEX_op_set_label, n); > } > - > +/** > + * Jump to a label. > + * \sa gen_new_label, gen_set_label > + */ > static inline void tcg_gen_br(int label) > { > tcg_gen_op1(&tcg_ctx, INDEX_op_br, label); > @@ -267,163 +301,386 @@ static inline void tcg_gen_br(int label) > /* Helper calls. */ > /* 32 bit ops */ > - > +/** > + * \sa tcg_gen_add_i32 > + */ > void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); > +/** > + * Subtracts 32 bit register arg2 from constant arg1. > + * \sa tcg_gen_sub_i32 > + */ > void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2); > +/** > + * \sa tcg_gen_sub_i32 > + */ > void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); > +/** > + * \sa tcg_gen_and_i32 > + */ > void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2); > +/** > + * \sa tcg_gen_or_i32 > + */ > void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); > +/** > + * \sa tcg_gen_xor_i32 > + */ > void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); > +/** > + * \sa tcg_gen_shl_i32 > + */ > void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); > +/** > + * \sa tcg_gen_shr_i32 > + */ > void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); > +/** > + * \sa tcg_gen_sar_i32 > + */ > void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); > +/** > + * \sa tcg_gen_mul_i32 > + */ > void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2); > +/** > + * ret = arg1/arg2 (signed). > + * Undefined behavior if division by zero or overflow. > + */ > void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = arg1\%arg2 (signed). > + * Undefined behavior if division by zero or overflow. > + */ > void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = arg1/arg2 (unsigned). Undefined behavior if division by zero. > + */ > void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = arg1\%arg2 (unsigned). Undefined behavior if division by zero. > + */ > void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = arg1 & ~arg2. > + */ > void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = ~(arg1 ^ arg2), or equivalently ret = arg1^~arg2. > + */ > void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = ~(arg1 & arg2). > + */ > void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = ~(arg1 | arg2). > + */ > void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * ret = arg1 | ~arg2. > + */ > void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * Rotation of t2 bits to the left. > + * Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) > + */ > void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); > +/** > + * Rotation of t2 bits to the right. > + * Unspecified behavior if t2 < 0 or t2 >= 32 (resp 64) > + */ > void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); > void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2); > +/** > + * Deposit arg2 as a bitfield into arg1, placing the result in ret. > + * The bitfield is described by pos/len, which are immediate values: > + * > + * @param len the length of the bitfield > + * @param pos the position of the first bit, counting from the LSB > + * > + * For example, pos=8, len=4 indicates a 4-bit field at bit 8. > + * This operation would be equivalent to > + * > + * dest = (t1 & ~0x0f00) | ((t2 << 8) & 0x0f00) > + */ > void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, > unsigned int ofs, unsigned int len); > +/** > + * Conditional jump to label if arg1 cond arg2 is true. > + * @param cond may be: > + * - TCG_COND_EQ > + * - TCG_COND_NE > + * - TCG_COND_LT (signed) > + * - TCG_COND_GE (signed) > + * - TCG_COND_LE (signed) > + * - TCG_COND_GT (signed) > + * - TCG_COND_LTU (unsigned) > + * - TCG_COND_GEU (unsigned) > + * - TCG_COND_LEU (unsigned) > + * - TCG_COND_GTU (unsigned) > + */ > void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, int > label); > +/** > + * \sa tcg_gen_brcond_i32 > + */ > void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, int > label); > +/** > + * ret = (arg1 cond arg2) > + * @param cond may be: > + * - TCG_COND_EQ > + * - TCG_COND_NE > + * - TCG_COND_LT (signed) > + * - TCG_COND_GE (signed) > + * - TCG_COND_LE (signed) > + * - TCG_COND_GT (signed) > + * - TCG_COND_LTU (unsigned) > + * - TCG_COND_GEU (unsigned) > + * - TCG_COND_LEU (unsigned) > + * - TCG_COND_GTU (unsigned) > + */ > void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, > TCGv_i32 arg1, TCGv_i32 arg2); > +/** > + * \sa tcg_gen_setcond_i32 > + */ > void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, > TCGv_i32 arg1, int32_t arg2); > +/** > + * ret = (c1 cond c2 ? v1 : v2) > + * @param cond may be: > + * - TCG_COND_EQ > + * - TCG_COND_NE > + * - TCG_COND_LT (signed) > + * - TCG_COND_GE (signed) > + * - TCG_COND_LE (signed) > + * - TCG_COND_GT (signed) > + * - TCG_COND_LTU (unsigned) > + * - TCG_COND_GEU (unsigned) > + * - TCG_COND_LEU (unsigned) > + * - TCG_COND_GTU (unsigned) > + */ > void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, > TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2); > +/** > + * Similar to add, except that the double-word inputs A and B are > + * formed from two single-word arguments (al|ah and bl|bh), and the > + * double-word output R is returned in two single-word outputs (rl|rh). > + * @param cond may be: > + * - TCG_COND_EQ > + * - TCG_COND_NE > + * - TCG_COND_LT (signed) > + * - TCG_COND_GE (signed) > + * - TCG_COND_LE (signed) > + * - TCG_COND_GT (signed) > + * - TCG_COND_LTU (unsigned) > + * - TCG_COND_GEU (unsigned) > + * - TCG_COND_LEU (unsigned) > + * - TCG_COND_GTU (unsigned) > + */ > void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, > TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); > +/** > + * Similar to sub, except that the double-word inputs A and B are > + * formed from two single-word arguments (al|ah and bl|bh), and the > + * double-word output R is returned in two single-word outputs (rl|rh). > + */ > void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, > TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh); > +/** > + * Similar to mul, except two unsigned inputs arg1 and arg2 yielding the > full > + * double-word product R. The later is returned in two single-word > + * outputs (rl|rh). > + */ > void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 > arg2); > +/** > + * Similar to mulu2, except the two inputs arg1 and arg2 are signed. > + * \sa tcg_gen_mulu2_i32 > + */ > void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 > arg2); > +/** > + * 8 bit sign extension (both operands must have the same type) > + */ > void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg); > +/** > + * 16 bit sign extension (both operands must have the same type) > + */ > void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg); > +/** > + * 8 bit zero extension (both operands must have the same type) > + */ > void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg); > +/** > + * 16 bit zero extension (both operands must have the same type) > + */ > void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg); > +/** > + * 16 bit byte swap on a 32/64 bit value. It assumes that the two/six high > + * order bytes are set to zero. > + */ > void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg); > +/** > + * 32 bit byte swap on a 32/64 bit value. With a 64 bit value, it assumes > that > + * the four high order bytes are set to zero. > + */ > void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg); > - > +/** > + * Indicate that the value of arg won't be used later. It is useful to > + * force dead code elimination. > + */ > static inline void tcg_gen_discard_i32(TCGv_i32 arg) > { > tcg_gen_op1_i32(INDEX_op_discard, arg); > } > - > +/** > + * ret = arg > + */ > static inline void tcg_gen_mov_i32(TCGv_i32 ret, TCGv_i32 arg) > { > if (!TCGV_EQUAL_I32(ret, arg)) { > tcg_gen_op2_i32(INDEX_op_mov_i32, ret, arg); > } > } > - > +/** > + * \sa tcg_gen_mov_i32 > + */ > static inline void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) > { > tcg_gen_op2i_i32(INDEX_op_movi_i32, ret, arg); > } > - > +/** > + * Load 8 bits without sign extension from host memory. > + */ > static inline void tcg_gen_ld8u_i32(TCGv_i32 ret, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_ld8u_i32, ret, arg2, offset); > } > - > +/** > + * Load 8 bits with sign extension from host memory. > + */ > static inline void tcg_gen_ld8s_i32(TCGv_i32 ret, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_ld8s_i32, ret, arg2, offset); > } > - > +/** > + * Load 16 bits without sign extension from host memory. > + */ > static inline void tcg_gen_ld16u_i32(TCGv_i32 ret, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_ld16u_i32, ret, arg2, offset); > } > - > +/** > + * Load 16 bits with sign extension from host memory. > + */ > static inline void tcg_gen_ld16s_i32(TCGv_i32 ret, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_ld16s_i32, ret, arg2, offset); > } > - > +/** > + * Load 32 bits from host memory. > + */ > static inline void tcg_gen_ld_i32(TCGv_i32 ret, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_ld_i32, ret, arg2, offset); > } > - > +/** > + * Store 8 bits to host memory. > + */ > static inline void tcg_gen_st8_i32(TCGv_i32 arg1, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_st8_i32, arg1, arg2, offset); > } > - > +/** > + * Store 16 bits to host memory. > + */ > static inline void tcg_gen_st16_i32(TCGv_i32 arg1, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_st16_i32, arg1, arg2, offset); > } > - > +/** > + * Store 32 bits to host memory. > + */ > static inline void tcg_gen_st_i32(TCGv_i32 arg1, TCGv_ptr arg2, > tcg_target_long offset) > { > tcg_gen_ldst_op_i32(INDEX_op_st_i32, arg1, arg2, offset); > } > - > +/** > + * ret = arg1 + arg2 > + */ > static inline void tcg_gen_add_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_add_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 - arg2 > + */ > static inline void tcg_gen_sub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_sub_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 & arg2 > + */ > static inline void tcg_gen_and_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_and_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 | arg2 > + */ > static inline void tcg_gen_or_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) > { > tcg_gen_op3_i32(INDEX_op_or_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 ^ arg2 > + */ > static inline void tcg_gen_xor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_xor_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 << arg2. > + * Unspecified behavior if arg2 < 0 or arg2 >= 32 (resp 64) > + */ > static inline void tcg_gen_shl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_shl_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 >> arg2 (unsigned). > + * Unspecified behavior if arg2 < 0 or arg2 >= 32 (resp 64) > + */ > static inline void tcg_gen_shr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_shr_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 >> arg2 (signed). > + * Unspecified behavior if arg2 < 0 or arg2 >= 32 (resp 64) > + */ > static inline void tcg_gen_sar_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_sar_i32, ret, arg1, arg2); > } > - > +/** > + * ret = arg1 * arg2. > + */ > static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 > arg2) > { > tcg_gen_op3_i32(INDEX_op_mul_i32, ret, arg1, arg2); > } > - > +/** > + * ret = -arg (two's complement). > + */ > static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 arg) > { > if (TCG_TARGET_HAS_neg_i32) { > @@ -432,7 +689,9 @@ static inline void tcg_gen_neg_i32(TCGv_i32 ret, TCGv_i32 > arg) > tcg_gen_subfi_i32(ret, 0, arg); > } > } > - > +/** > + * ret = ~arg > + */ > static inline void tcg_gen_not_i32(TCGv_i32 ret, TCGv_i32 arg) > { > if (TCG_TARGET_HAS_not_i32) { > @@ -683,19 +942,44 @@ static inline void tcg_gen_neg_i64(TCGv_i64 ret, > TCGv_i64 arg) > } > /* Size changing operations. */ > - > +/** > + * Convert arg (32 bit) to ret (64 bit) and does zero extension. > + */ > void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg); > +/** > + * Convert arg (32 bit) to ret (64 bit) and does sign extension. > + */ > void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg); > +/** > + * Construct dest (64-bit) taking the low half from low (32 bit) and the > + * high half from high (32 bit). > + */ > void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high); > +/** > + * Right shift the 64-bit input arg by c and truncate to 32-bit output ret. > + */ > void tcg_gen_trunc_shr_i64_i32(TCGv_i32 ret, TCGv_i64 arg, unsigned int c); > +/** > + * Extracts the low half of arg (64 bit) into lo (32 bit) and the high half > + * into hi (32 bit). > + */ > void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg); > +/** > + * Extracts the low half of arg (64 bit) into lo (64 bit) and the high half > + * into hi (64 bit). Both values are not sign extended. > + */ > void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg); > - > +/** > + * Construct ret (64-bit) taking the low half from lo (64 bit) and the > + * high half from hi (64 bit). > + */ > static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 > hi) > { > tcg_gen_deposit_i64(ret, lo, hi, 32, 32); > } > - > +/** > + * Truncate arg (64 bit) to ret (32 bit) > + */ > static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg) > { > tcg_gen_trunc_shr_i64_i32(ret, arg, 0); > @@ -718,12 +1002,25 @@ static inline void tcg_gen_debug_insn_start(uint64_t > pc) > tcg_gen_op1i(INDEX_op_debug_insn_start, pc); > #endif > } > - > +/** > + * Exit translation block. > + * @param val may be 0 or TB address ORed with the index of the taken jump > + * slot. If you tcg_gen_exit_tb(0), chaining will not happen and > + * a new TB will be looked up based on the CPU state. > + * \sa tcg_gen_goto_tb > + */ > static inline void tcg_gen_exit_tb(uintptr_t val) > { > tcg_gen_op1i(INDEX_op_exit_tb, val); > } > - > +/** > + * Every TB can goto_tb to max two other different destinations. There are > two > + * jump slots. tcg_gen_goto_tb takes a jump slot index as an arg, 0 or 1. > + * These jumps will only take place if the TB's get chained, you need to > + * tcg_gen_exit_tb with (tb | index) for that to ever happen. > tcg_gen_goto_tb > + * may be issued at most once with each slot index per TB. > + * \sa tcg_gen_exit_tb > + */ > void tcg_gen_goto_tb(unsigned idx); > #if TARGET_LONG_BITS == 32 > @@ -752,61 +1049,159 @@ void tcg_gen_goto_tb(unsigned idx); > #define tcg_gen_qemu_st_tl tcg_gen_qemu_st_i64 > #endif > +/** > + * Load an 8/16/32/64 bit quantity TCGv_i32 from target memory at address > TCGv, > + * depending on TCGMemOp. > + * @param TCGMemOp may be: > + * - MO_UB (8 bit unsigned) > + * - MO_UW (16 bit unsigned) > + * - MO_UL (32 bit unsigned) > + * - MO_SB (8 bit signed) > + * - MO_SW (16 bit signed) > + * - MO_SL (32 bit signed) > + * - MO_Q (64 bit) > + * - MO_LEUW (16 bit unsigned, little endian) > + * - MO_LEUL (32 bit unsigned, little endian) > + * - MO_LESW (16 bit signed, little endian) > + * - MO_LESL (32 bit signed, little endian) > + * - MO_LEQ (64 bit, little endian) > + * - MO_BEUW (16 bit unsigned, big endian) > + * - MO_BEUL (32 bit unsigned, big endian) > + * - MO_BESW (16 bit signed, big endian) > + * - MO_BESL (32 bit signed, big endian) > + * - MO_BEQ (64 bit, big endian) > + * - MO_TEUW (16 bit unsigned, target endian) > + * - MO_TEUL (32 bit unsigned, target endian) > + * - MO_TESW (16 bit signed, target endian) > + * - MO_TESL (32 bit signed, target endian) > + * - MO_TEQ (64 bit, target endian) > + */ > void tcg_gen_qemu_ld_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); > +/** > + * Store an 8/16/32/64 bit quantity TCGv_i32 to target memory at address > TCGv, > + * depending on TCGMemOp. > + * @param TCGMemOp may be: > + * - MO_UB (8 bit unsigned) > + * - MO_UW (16 bit unsigned) > + * - MO_UL (32 bit unsigned) > + * - MO_SB (8 bit signed) > + * - MO_SW (16 bit signed) > + * - MO_SL (32 bit signed) > + * - MO_Q (64 bit) > + * - MO_LEUW (16 bit unsigned, little endian) > + * - MO_LEUL (32 bit unsigned, little endian) > + * - MO_LESW (16 bit signed, little endian) > + * - MO_LESL (32 bit signed, little endian) > + * - MO_LEQ (64 bit, little endian) > + * - MO_BEUW (16 bit unsigned, big endian) > + * - MO_BEUL (32 bit unsigned, big endian) > + * - MO_BESW (16 bit signed, big endian) > + * - MO_BESL (32 bit signed, big endian) > + * - MO_BEQ (64 bit, big endian) > + * - MO_TEUW (16 bit unsigned, target endian) > + * - MO_TEUL (32 bit unsigned, target endian) > + * - MO_TESW (16 bit signed, target endian) > + * - MO_TESL (32 bit signed, target endian) > + * - MO_TEQ (64 bit, target endian) > + */ > void tcg_gen_qemu_st_i32(TCGv_i32, TCGv, TCGArg, TCGMemOp); > void tcg_gen_qemu_ld_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); > void tcg_gen_qemu_st_i64(TCGv_i64, TCGv, TCGArg, TCGMemOp); > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load an 8 bit quantity from target memory and zero extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load an 8 bit quantity from target memory and sign extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load a 16 bit quantity from target memory and zero extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load a 16 bit quantity from target memory and sign extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load a 32 bit quantity from target memory and zero extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load a 32 bit quantity from target memory and sign extend. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_ld_i32. > + * Load a 64 bit quantity from target memory. > + * \sa tcg_gen_qemu_ld_i32 > + */ > static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) > { > tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_st_i32. > + * Store an 8 bit quantity to target memory. > + * \sa tcg_gen_qemu_st_i32 > + */ > static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) > { > tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_st_i32. > + * Store a 16 bit quantity to target memory. > + * \sa tcg_gen_qemu_st_i32 > + */ > static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index) > { > tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_st_i32. > + * Store a 32 bit quantity to target memory. > + * \sa tcg_gen_qemu_st_i32 > + */ > static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) > { > tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL); > } > - > +/** > + * \deprecated Please use tcg_gen_qemu_st_i32. > + * Store a 64 bit quantity to target memory. > + * \sa tcg_gen_qemu_st_i32 > + */ > static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) > { > tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ); > -- > 2.2.2 -- "And it's much the same thing with knowledge, for whenever you learn something new, the whole world becomes that much richer." -- The Princess of Pure Reason, as told by Norton Juster in The Phantom Tollbooth