THEORY bitvectors-cast; LOGIC QF_UFBV; SOLVER external; SIGNATURE Xor, lshr, sub_nuw, select, icmp_eq, sub, sub_nsw, add_nuw, add, shl_nuw, mul, srem, add_nsw, urem, icmp_ne, icmp, Or, And, sdiv, lshr_exact, udiv, shl, !BITVECTOR; RULES add(lhs, rhs) -> add_nsw(lhs, rhs) [ WillNotOverflowSignedAdd.32(lhs, rhs)] ; /* AddSub 1 ; AddSub:1334*/ add(lhs, rhs) -> add_nuw(lhs, rhs) [ WillNotOverflowUnsignedAdd.32(lhs, rhs)] ; /* AddSub 2*/ sub(x, C) -> add(x, neg.32(C)); /* AddSub 3*/ sub_nsw(x, C) -> add_nsw(x, neg.32(C)) [ (C !=.32 (bv32"#x00000001" <<.32 (width.32(C) -.32 bv32"#x00000001")))] ; /* AddSub 4*/ sub(A, B) -> sub_nsw(A, B) [ WillNotOverflowSignedSub.32(A, B)] ; /* AddSub 5*/ sub(A, B) -> sub_nuw(A, B) [ WillNotOverflowUnsignedSub.32(A, B)] ; /* AddSub 6*/ And(Xor(X, C1), C2) -> Xor(And(X, C2), CL0) [ ((C1 &.32 C2) =.32 CL0)] ; /* AndOrXor 1*/ And(Or(X, C1), C2) -> And(Or(X, CL0), C2) [ ((C1 &.32 C2) =.32 CL0)] ; /* AndOrXor 2*/ And(Or(X, C1), C2) -> Or(And(X, CL0), C1) [ (((C2 ^.32 (C1 &.32 C2)) =.32 CL0) /\ ((C1 &.32 C2) =.32 C1))] ; /* AndOrXor 3*/ And(shl(X, C1), C2) -> And(shl(X, C1), CL0) [ (((C2 &.32 (bv32"#xffffffff" <<.32 C1)) =.32 CL0) /\ (((C2 &.32 (bv32"#xffffffff" <<.32 C1)) !=.32 (bv32"#xffffffff" <<.32 C1)) /\ ((C2 &.32 (bv32"#xffffffff" <<.32 C1)) !=.32 C2)))] ; /* AndOrXor 4*/ And(lshr(X, C1), C2) -> And((X >>u.32 C1), CL0) [ (((C2 &.32 (bv32"#xffffffff" >>u.32 C1)) =.32 CL0) /\ ((C2 &.32 (bv32"#xffffffff" >>u.32 C1)) !=.32 (bv32"#xffffffff" >>u.32 C1)))] ; /* AndOrXor 5*/ And(Xor(op0LHS, op0RHS), C) -> Xor(And(op0LHS, C), op0RHS) [ MaskedValueIsZero.32(op0RHS, ~.32(C))] ; /* AndOrXor 6*/ And(Or(op0LHS, op0RHS), C) -> Or(And(op0LHS, C), op0RHS) [ MaskedValueIsZero.32(op0RHS, ~.32(C))] ; /* AndOrXor 7*/ And(sub(A, B), C) -> And(sub(bv32"#x00000000", B), C) [ MaskedValueIsZero.32(A, (bv32"#xffffffff" >>u.32 countLeadingZeros.32(C)))] ; /* AndOrXor 8*/ And(Xor(notOp0, bv32"#xffffffff"), Xor(notOp1, bv32"#xffffffff")) -> Xor(Or(notOp0, notOp1), bv32"#xffffffff"); /* AndOrXor 9*/ Or(And(x, C1), C) -> And(Or(x, C), CL0) [ (((C |.32 C1) =.32 CL0) /\ ((C &.32 C1) !=.32 bv32"#x00000000"))] ; /* AndOrXor 10*/ Or(Xor(A, C1), op1) -> Xor(Or(A, op1), C1) [ MaskedValueIsZero.32(op1, C1)] ; /* AndOrXor 11*/ Or(Xor(A, bv32"#xffffffff"), Xor(B, bv32"#xffffffff")) -> Xor(And(A, B), bv32"#xffffffff"); /* AndOrXor 12*/ Or(Or(A, C1), op1) -> Or(Or(A, op1), C1); /* AndOrXor 13*/ Xor(And(x, y), bv32"#xffffffff") -> Or(Xor(x, bv32"#xffffffff"), Xor(y, bv32"#xffffffff")); /* AndOrXor 14*/ Xor(Or(x, y), bv32"#xffffffff") -> And(Xor(x, bv32"#xffffffff"), Xor(y, bv32"#xffffffff")); /* AndOrXor 15*/ Xor(And(a, op1), op1) -> And(Xor(a, bv32"#xffffffff"), op1); /* AndOrXor 16*/ udiv(X, lshr(A, B)) -> udiv(X, lshr_exact(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 1*/ udiv(X, shl(A, B)) -> udiv(X, shl_nuw(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 2*/ sdiv(X, lshr(A, B)) -> sdiv(X, lshr_exact(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 3*/ sdiv(X, shl(A, B)) -> sdiv(X, shl_nuw(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 4*/ urem(X, lshr(A, B)) -> urem(X, lshr_exact(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 5*/ urem(X, shl(A, B)) -> urem(X, shl_nuw(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 6*/ srem(X, lshr(A, B)) -> srem(X, lshr_exact(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 7*/ srem(X, shl(A, B)) -> srem(X, shl_nuw(A, B)) [ (isPowerOf2.32(A) /\ hasOneUse)] ; /* MulDivRem 8*/ mul(sub(Y, X), C) -> mul(sub(X, Y), CL0) [ ((abs.32(C) =.32 CL0) /\ ((C <s.32 bv32"#x00000000") /\ isPowerOf2.32(abs.32(C))))] ; /* MulDivRem 9*/ mul(add(Y, C1), C) -> mul(sub(neg.32(C1), Y), CL0) [ ((abs.32(C) =.32 CL0) /\ ((C <s.32 bv32"#x00000000") /\ isPowerOf2.32(abs.32(C))))] ; /* MulDivRem 10*/ select(icmp_eq(X, C), X, Y) -> select(icmp_eq(X, C), C, Y); /* Select 1*/ select(icmp_ne(X, C), Y, X) -> select(icmp_ne(X, C), Y, C); /* Select 2*/ shl(Op0, C) -> shl_nuw(Op0, C) [ MaskedValueIsZero.32(Op0, (bv32"#xffffffff" <<.32 (width.32(C) -.32 C)))] ; /* Shift 1*/ sub_nsw(X0, X1) -> sub(X0, X1); /**/ icmp_ne(X0, X1) -> icmp(X0, X1); /**/ shl_nuw(X0, X1) -> shl(X0, X1); /**/ add_nuw(X0, X1) -> add(X0, X1); /**/ add_nsw(X0, X1) -> add(X0, X1); /**/ sub_nuw(X0, X1) -> sub(X0, X1); /**/ icmp_eq(X0, X1) -> icmp(X0, X1); /**/ lshr_exact(X0, X1) -> lshr(X0, X1); /**/ NON-STANDARD IRREGULAR QUERY loops