Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
part_count = size // (part_size * 2)
a_parts = unpack(ctx, a, part_size)[:part_count]
if a == b:
b_parts = a_parts
else:
b_parts = unpack(ctx, b, part_size)[:part_count]
parts = []
for j in range(0, part_count):
parts.append(a_parts[j])
parts.append(b_parts[j])
value = pack(ctx, parts)
operand.set(ctx, i, 0, value)
def _write_bit(ctx, i, base_index, offset_index, bit):
if operand.is_memory(ctx, i, base_index):
# nasty case, indexing into in-memory bitstring; offset can be
# > word_size
base = operand.get_address(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
offset_sign = ctx.tmp(8)
byte_offset = ctx.tmp(base.size)
tmp0 = ctx.tmp(offset.size)
byte = ctx.tmp(8)
bitmask = ctx.tmp(8)
ctx.emit( and_ (offset, imm(sign_bit(offset.size), offset.size), tmp0))
ctx.emit( bisnz_(tmp0, offset_sign))
ctx.emit( and_ (offset, imm(~sign_bit(offset.size), offset.size), offset))
ctx.emit( div_ (offset, imm(8, offset.size), byte_offset))
ctx.emit( mod_ (offset, imm(8, offset.size), offset))
ctx.emit( jcc_ (offset_sign, 'negative_offset'))
ctx.emit( add_ (base, byte_offset, base))
ctx.emit( jcc_ (imm(1, 8), 'base_calculated'))
def x86_das(ctx, i):
al = operand.get_register(ctx, i, 'al')
result_al = ctx.tmp(8)
tmp0 = ctx.tmp(16)
tmp1 = ctx.tmp(8)
# ((al & 0xf) > 9
ctx.emit( and_ (al, imm(0xf, 8), result_al))
ctx.emit( sub_ (result_al, imm(9, 8), tmp0))
ctx.emit( and_ (tmp0, imm(0xff00, 16), tmp0))
ctx.emit( bisnz_(tmp0, tmp1))
# || af == 1)
ctx.emit( or_ (tmp1, r('af', 8), tmp1))
ctx.emit( jcc_ (tmp1, 'adjust0'))
ctx.emit( str_ (imm(0, 8), r('af', 8)))
ctx.emit( jcc_ (imm(1, 8), 'done0'))
def x86_cpuid(ctx, i):
eax = operand.get_register(ctx, i, 'eax')
flag = ctx.tmp(8)
ctx.emit( equ_ (eax, imm(0, 32), flag))
ctx.emit( jcc_ (flag, 'cpuid_basic'))
ctx.emit( jcc_ (imm(1, 8), 'done'))
ctx.emit('cpuid_basic')
operand.set_register(ctx, i, 'eax', imm(0, 32))
operand.set_register(ctx, i, 'ebx', imm(0x756e6547, 32))
operand.set_register(ctx, i, 'ecx', imm(0x49656e69, 32))
operand.set_register(ctx, i, 'edx', imm(0x6c65746e, 32))
ctx.emit( jcc_ (imm(1, 8), 'done'))
ctx.emit('done')
ctx.emit( nop_())
def x86_mov(ctx, i):
size = operand.get_size(ctx, i, 0)
value = None
clear = True
if len(i.operands) == 1:
# source is the accumulator
value = ctx.accumulator
if i.operands[0].type == capstone.x86.X86_OP_REG:
clear = False
else:
value = operand.get(ctx, i, 1, size=size)
if (i.operands[0].type == capstone.x86.X86_OP_REG and
i.operands[1].type == capstone.x86.X86_OP_REG):
clear = False
# Oh x86 how I hate you
if i.operands[1].type == capstone.x86.X86_OP_MEM and operand.get_size(ctx, i, 1) != 32:
clear = False
operand.set(ctx, i, 0, value, clear=clear)
def x86_pcmpgt(ctx, i, size):
a_id, b_id, dst_id = vex_opnds(i)
a = operand.get(ctx, i, a_id)
b = operand.get(ctx, i, b_id)
a_parts = unpack(ctx, a, size)
b_parts = unpack(ctx, b, size)
a_sign = ctx.tmp(size)
a_abs = ctx.tmp(size)
b_sign = ctx.tmp(size)
b_abs = ctx.tmp(size)
tmp0 = ctx.tmp(size * 2)
a_abs_lt_b_abs = ctx.tmp(8)
tmp1 = ctx.tmp(size)
a_b_same_sign = ctx.tmp(8)
a_neg = ctx.tmp(8)
b_nonneg = ctx.tmp(8)