The logic is the same, but the majority(NOT a, b, c) is brought out to a separate macro and implemented without NOT operations.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- target/i386/hvf/x86_flags.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/target/i386/hvf/x86_flags.c b/target/i386/hvf/x86_flags.c index fedc70a1b80..60ab4f01a20 100644 --- a/target/i386/hvf/x86_flags.c +++ b/target/i386/hvf/x86_flags.c @@ -45,14 +45,30 @@ #define LF_MASK_CF (0x01 << LF_BIT_CF) #define LF_MASK_PO (0x01 << LF_BIT_PO) +/* majority(NOT a, b, c) = (a ^ b) ? b : c */ +#define MAJ_INV1(a, b, c) ((((a) ^ (b)) & ((b) ^ (c))) ^ (c)) + +/* + * ADD_COUT_VEC(x, y) = majority((x + y) ^ x ^ y, x, y) + * + * If two corresponding bits in x and y are the same, that's the carry + * independent of the value (x+y)^x^y. Hence x^y can be replaced with + * 1 in (x+y)^x^y, resulting in majority(NOT (x+y), x, y) + */ #define ADD_COUT_VEC(op1, op2, result) \ - (((op1) & (op2)) | (((op1) | (op2)) & (~(result)))) + MAJ_INV1(result, op1, op2) +/* + * SUB_COUT_VEC(x, y) = NOT majority(x, NOT y, (x - y) ^ x ^ NOT y) + * = majority(NOT x, y, (x - y) ^ x ^ y) + * + * Note that the carry out is actually a borrow, i.e. it is inverted. + * If two corresponding bits in x and y are different, the value of the + * bit in (x-y)^x^y likewise does not matter. Hence, x^y can be replaced + * with 0 in (x-y)^x^y, resulting in majority(NOT x, y, x-y) + */ #define SUB_COUT_VEC(op1, op2, result) \ - (((~(op1)) & (op2)) | (((~(op1)) ^ (op2)) & (result))) - -#define GET_ADD_OVERFLOW(op1, op2, result, mask) \ - ((((op1) ^ (result)) & ((op2) ^ (result))) & (mask)) + MAJ_INV1(op1, op2, result) /* ******************* */ /* OSZAPC */ -- 2.49.0