Discussion:
vex: r3305 - /trunk/priv/guest_amd64_helpers.c
(too old to reply)
s***@valgrind.org
2017-02-21 15:08:29 UTC
Permalink
Raw Message
Author: sewardj
Date: Tue Feb 21 15:08:28 2017
New Revision: 3305

Log:
Push some spec rules for amd64 that have been sitting around for a while:

amd64g_calculate_condition:
S and NS after SUBW
Z and NZ after SHRQ
NZ after SHRL (Z after SHRL was already present)

amd64g_calculate_rflags_c:
C after ADDQ
C after ADDL

At least the first 5 reduce the Memcheck noise level from running
optimised code compiled by Clang.


Modified:
trunk/priv/guest_amd64_helpers.c

Modified: trunk/priv/guest_amd64_helpers.c
==============================================================================
--- trunk/priv/guest_amd64_helpers.c (original)
+++ trunk/priv/guest_amd64_helpers.c Tue Feb 21 15:08:28 2017
@@ -1353,6 +1353,34 @@
binop(Iop_Shl64, cc_dep2, mkU8(48))));
}

+ /* 8, 9 */
+ if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondS)
+ && isU64(cc_dep2, 0)) {
+ /* word sub/cmp of zero, then S --> test (dst-0 <s 0)
+ --> test dst <s 0
+ --> (ULong)dst[15]
+ This is yet another scheme by which clang figures out if the
+ top bit of a word is 1 or 0. See also LOGICB/CondS below. */
+ /* Note: isU64(cc_dep2, 0) is correct, even though this is
+ for an 16-bit comparison, since the args to the helper
+ function are always U64s. */
+ return binop(Iop_And64,
+ binop(Iop_Shr64,cc_dep1,mkU8(15)),
+ mkU64(1));
+ }
+ if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondNS)
+ && isU64(cc_dep2, 0)) {
+ /* word sub/cmp of zero, then NS --> test !(dst-0 <s 0)
+ --> test !(dst <s 0)
+ --> (ULong) !dst[15]
+ */
+ return binop(Iop_Xor64,
+ binop(Iop_And64,
+ binop(Iop_Shr64,cc_dep1,mkU8(15)),
+ mkU64(1)),
+ mkU64(1));
+ }
+
/* 14, */
if (isU64(cc_op, AMD64G_CC_OP_SUBW) && isU64(cond, AMD64CondLE)) {
/* word sub/cmp, then LE (signed less than or equal)
@@ -1604,6 +1632,19 @@
mkU64(0)));
}

+ /*---------------- SHRQ ----------------*/
+
+ if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondZ)) {
+ /* SHRQ, then Z --> test dep1 == 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpEQ64, cc_dep1, mkU64(0)));
+ }
+ if (isU64(cc_op, AMD64G_CC_OP_SHRQ) && isU64(cond, AMD64CondNZ)) {
+ /* SHRQ, then NZ --> test dep1 != 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpNE64, cc_dep1, mkU64(0)));
+ }
+
/*---------------- SHRL ----------------*/

if (isU64(cc_op, AMD64G_CC_OP_SHRL) && isU64(cond, AMD64CondZ)) {
@@ -1612,6 +1653,12 @@
binop(Iop_CmpEQ32, unop(Iop_64to32, cc_dep1),
mkU32(0)));
}
+ if (isU64(cc_op, AMD64G_CC_OP_SHRL) && isU64(cond, AMD64CondNZ)) {
+ /* SHRL, then NZ --> test dep1 != 0 */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpNE32, unop(Iop_64to32, cc_dep1),
+ mkU32(0)));
+ }

/*---------------- COPY ----------------*/
/* This can happen, as a result of amd64 FP compares: "comisd ... ;
@@ -1732,6 +1779,20 @@
binop(Iop_And64,cc_dep1,mkU64(0xFF)),
binop(Iop_And64,cc_dep2,mkU64(0xFF))));
}
+ if (isU64(cc_op, AMD64G_CC_OP_ADDQ)) {
+ /* C after add denotes sum <u either arg */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpLT64U,
+ binop(Iop_Add64, cc_dep1, cc_dep2),
+ cc_dep1));
+ }
+ if (isU64(cc_op, AMD64G_CC_OP_ADDL)) {
+ /* C after add denotes sum <u either arg */
+ return unop(Iop_1Uto64,
+ binop(Iop_CmpLT32U,
+ unop(Iop_64to32, binop(Iop_Add64, cc_dep1, cc_dep2)),
+ unop(Iop_64to32, cc_dep1)));
+ }
if (isU64(cc_op, AMD64G_CC_OP_LOGICQ)
|| isU64(cc_op, AMD64G_CC_OP_LOGICL)
|| isU64(cc_op, AMD64G_CC_OP_LOGICW)

Loading...