Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _x86_psub(ctx, i, part_size):
a_id, b_id, dst_id = vex_opnds(i)
a = operand.get(ctx, i, a_id)
b = operand.get(ctx, i, b_id)
size = min(a.size, b.size)
part_count = size // part_size
a_parts = unpack(ctx, a, part_size)[:part_count]
if a == b:
b_parts = a_parts
else:
b_parts = unpack(ctx, b, part_size)[:part_count]
parts = []
for j in range(0, part_count):
tmp = ctx.tmp(part_size)
ctx.emit( sub_ (a_parts[j], b_parts[j], tmp))
parts.append(tmp)
def x86_pcmpeq(ctx, i, size):
a_id, b_id, dst_id = vex_opnds(i)
a = operand.get(ctx, i, a_id)
b = operand.get(ctx, i, b_id)
a_parts = unpack(ctx, a, size)
b_parts = unpack(ctx, b, size)
dst_parts = []
for (a_part, b_part) in zip(a_parts, b_parts):
tmp = ctx.tmp(size)
ctx.emit( equ_ (a_part, b_part, tmp))
ctx.emit( mul_ (tmp, imm(mask(size), size), tmp))
dst_parts.append(tmp)
value = pack(ctx, dst_parts)
def x86_pcmpeq(ctx, i, size):
a_id, b_id, dst_id = vex_opnds(i)
a = operand.get(ctx, i, a_id)
b = operand.get(ctx, i, b_id)
a_parts = unpack(ctx, a, size)
b_parts = unpack(ctx, b, size)
dst_parts = []
for (a_part, b_part) in zip(a_parts, b_parts):
tmp = ctx.tmp(size)
ctx.emit( equ_ (a_part, b_part, tmp))
ctx.emit( mul_ (tmp, imm(mask(size), size), tmp))
dst_parts.append(tmp)
value = pack(ctx, dst_parts)
operand.set(ctx, i, dst_id, value)
result = ctx.tmp(b.size * 2)
result_value = ctx.tmp(b.size)
ctx.emit( mul_ (a, b, result))
ctx.emit( str_ (result, result_value))
operand.set_register(ctx, i, a_reg, result_value)
ctx.emit( lshr_ (result, imm(b.size, 8), result_value))
operand.set_register(ctx, i, b_reg, result_value)
_imul_set_flags(ctx, result)
elif len(i.operands) == 2:
# double operand form
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
result = ctx.tmp(a.size * 2)
ctx.emit( mul_ (a, b, result))
operand.set(ctx, i, 0, result)
_imul_set_flags(ctx, result)
else:
# triple operand form
a = operand.get(ctx, i, 1)
b = operand.get(ctx, i, 2)
if b.size < a.size:
prev_b = b
def x86_bsr(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('zf', 8)))
operand.undefine(ctx, i, 0)
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
def x86_andn(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1, a.size)
size = min(a.size, b.size)
result = ctx.tmp(size)
ctx.emit( xor_ (a, imm(mask(size), size), result))
ctx.emit( and_ (result, b, result))
_logic_set_flags(ctx, result)
operand.set(ctx, i, 0, result)
def x86_blsr(ctx, i):
a = operand.get(ctx, i, 1)
bit = imm(sign_bit(a.size), a.size)
index = imm(a.size, a.size)
bit = ctx.tmp(a.size)
index = ctx.tmp(a.size)
result = ctx.tmp(a.size)
tmp0 = ctx.tmp(a.size)
ctx.emit( jcc_ (a, 'non-zero'))
# if a is zero
ctx.emit( str_ (imm(1, 8), r('cf', 8)))
ctx.emit( jcc_ (imm(1, 8), 'done'))
# set up loop variables and clear zf
def x86_ror(ctx, i):
a = operand.get(ctx, i, 0)
b = operand.get(ctx, i, 1)
max_shift = a.size-1
size = a.size
tmp0 = ctx.tmp(size)
tmp1 = ctx.tmp(8)
tmp2 = ctx.tmp(size * 2)
tmp3 = ctx.tmp(size * 2)
tmp4 = ctx.tmp(size * 2)
tmp5 = ctx.tmp(size)
tmp6 = ctx.tmp(size * 2)
tmp7 = ctx.tmp(size)
tmp8 = ctx.tmp(size)
result = ctx.tmp(size)
# the rotate amount is truncated at word_size - 1