Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def x86_or(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1, a.size)
size = min(a.size, b.size)
result = ctx.tmp(size)
ctx.emit( or_ (a, b, result))
_logic_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
dst_part = ctx.tmp(size)
ctx.emit( sub_ (a_part, b_part, tmp0))
ctx.emit( and_ (tmp0, imm(sign_bit(size * 2), size * 2), tmp0))
ctx.emit( bisz_ (tmp0, tmp1))
ctx.emit( mul_ (a_part, tmp1, tmp0))
ctx.emit( xor_ (tmp1, imm(1, size * 2), tmp1))
ctx.emit( mul_ (b_part, tmp1, tmp1))
ctx.emit( add_ (tmp0, tmp1, tmp0))
ctx.emit( str_ (tmp0, dst_part))
dst_parts.append(dst_part)
value = pack(ctx, dst_parts)
operand.set(ctx, i, dst_id, value)
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
b = _sign_extend(ctx, a, b)
result = ctx.tmp(a.size * 2)
tmp0 = ctx.tmp(a.size * 2)
ctx.emit( add_ (a, b, result))
ctx.emit( add_ (result, r('cf', 8), result))
# only set carry flag
ctx.emit( and_ (result, imm(carry_bit(a.size), result.size), tmp0))
ctx.emit( bisnz_(tmp0, r('cf', 8)))
operand.set(ctx, i, 0, result)
# a < 0 and b >= 0
ctx.emit( bisnz_(a_sign, a_neg))
ctx.emit( bisz_ (b_sign, b_nonneg))
ctx.emit( and_ (a_neg, b_nonneg, a_neg_and_b_nonneg))
ctx.emit( and_ (a_abs_lt_b_abs, a_b_same_sign, cond))
ctx.emit( or_ (cond, a_neg_and_b_nonneg, cond))
ctx.emit( mul_ (cond, imm(mask(size), size), dst_part))
dst_parts.append(dst_part)
value = pack(ctx, dst_parts)
operand.set(ctx, i, dst_id, value)
def x86_adc(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
b = _sign_extend(ctx, a, b)
result = ctx.tmp(a.size * 2)
ctx.emit( add_ (a, b, result))
ctx.emit( add_ (result, r('cf', 8), result))
_add_set_flags(ctx, a, b, result)
operand.set(ctx, i, 0, result)
a_parts = unpack(ctx, a, size)
b_parts = unpack(ctx, b, size)
dst_parts = []
for (a_part, b_part) in zip(a_parts, b_parts):
tmp = ctx.tmp(size)
ctx.emit( equ_ (a_part, b_part, tmp))
ctx.emit( mul_ (tmp, imm(mask(size), size), tmp))
dst_parts.append(tmp)
value = pack(ctx, dst_parts)
operand.set(ctx, i, dst_id, value)
ctx.emit( or_ (tmp5, tmp7, result))
# compute carry flag (last bit that was shifted across)
ctx.emit( and_ (result, imm(sign_bit(size), size), tmp8))
ctx.emit( bisnz_(tmp8, r('cf', 8)))
if isinstance(b, reil.ImmediateOperand) and b.value == 1:
# overflow flag is msb of input ^ msb output
tmp9 = ctx.tmp(size)
ctx.emit( and_ (a, imm(sign_bit(size), size), tmp9))
ctx.emit( xor_ (tmp9, tmp8, tmp9))
ctx.emit( bisnz_(tmp9, r('of', 8)))
else:
ctx.emit( undef_(r('of', 8)))
operand.set(ctx, i, 0, result)
ctx.emit( 'zero_rotate')
ctx.emit( nop_())
def x86_pand(ctx, i):
a_id, b_id, dst_id = vex_opnds(i)
a = operand.get(ctx, i, a_id)
b = operand.get(ctx, i, b_id)
value = ctx.tmp(a.size)
ctx.emit( and_ (a, b, value))
# TODO: this will clear all the remaining bits of the destination register,
# which is incorrect for the legacy sse version. When ymmX register support
# is added, this will be broken.
operand.set(ctx, i, dst_id, value)
else:
# simple case, it's a register
a = operand.get(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
bitmask = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
tmp1 = ctx.tmp(a.size)
ctx.emit( lshl_ (imm(1, a.size), offset, bitmask))
ctx.emit( xor_ (bitmask, imm(mask(a.size), a.size), bitmask))
ctx.emit( and_ (a, bitmask, tmp0))
ctx.emit( str_ (bit, tmp1))
ctx.emit( lshl_ (tmp1, offset, tmp1))
ctx.emit( or_ (tmp0, tmp1, tmp1))
operand.set(ctx, i, base_index, tmp1)