Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def x86_movs(ctx, i, size):
# This is to handle the mnemonic overload (SSE movsd) for 'move scalar
# double-precision floating-point value' since capstone doesn't
# distinguish. That instruction is just a mov into/from the SSE
# registers.
if not operand.is_memory(ctx, i, 0) or not operand.is_memory(ctx, i, 1):
# so basically, if one of the operands is not a memory address, then we
# know that this is the SSE version, which x86_mov can handle.
return x86_mov(ctx, i)
value = ctx.tmp(size)
if i.mnemonic.startswith('rep'):
rep_prologue(ctx, i)
ctx.emit( ldm_ (ctx.source, value))
ctx.emit( stm_ (value, ctx.destination))
ctx.emit( jcc_ (r('df', 8), 'decrement'))
ctx.emit('increment')
ctx.emit( add_ (ctx.destination, imm(value.size // 8, ctx.word_size), ctx.destination))
ctx.emit( add_ (ctx.source, imm(value.size // 8, ctx.word_size), ctx.source))
ctx.emit( jcc_ (imm(1, 8), 'done'))
def _read_bit(ctx, i, base_index, offset_index):
bit = ctx.tmp(8)
if operand.is_memory(ctx, i, base_index):
# nasty case, indexing into in-memory bitstring; offset can be
# > word_size
base = operand.get_address(ctx, i, base_index)
offset = operand.get(ctx, i, offset_index)
offset_sign = ctx.tmp(8)
byte_offset = ctx.tmp(base.size)
tmp0 = ctx.tmp(offset.size)
tmp1 = ctx.tmp(offset.size)
tmp2 = ctx.tmp(offset.size)
byte = ctx.tmp(8)
bitmask = ctx.tmp(8)
ctx.emit( and_ (offset, imm(sign_bit(offset.size), offset.size), tmp0))
ctx.emit( bisnz_(tmp0, offset_sign))
ctx.emit( and_ (offset, imm(~sign_bit(offset.size), offset.size), tmp1))