Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def replace_pattern(self, graph: Graph, match: [str, Node]):
node = match['crop']
assert node.has_valid('axis')
node.axis = self.list_to_ndarray(node.axis)
in_shape = node.in_port(0).data.get_shape()
shape_rank = in_shape.size
axis_mask = int64_array([1 if i in node.axis else 0 for i in range(shape_rank)])
begin_mask = axis_mask.copy()
end_mask = axis_mask.copy()
if len(node.in_nodes()) == 2 and node.has_valid('offset'):
# Crop Type 1
begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node.axis, node.offset)}).create_node()
shape = Shape(graph, {'name': node.name + '/shape_of_crop'}).create_node()
end = Add(graph, {'name': node.name + '/end'}).create_node()
node.in_port(1).get_connection().get_source().connect(shape.in_port(0))
node.in_port(1).disconnect()
shape.out_port(0).connect(end.in_port(0))
begin.out_port(0).connect(end.in_port(1))
elif node.has_valid('dim') and node.has_valid('offset'):
# Crop Type 2
node.dim = self.list_to_ndarray(node.dim)
node.offset = self.list_to_ndarray(node.offset)
assert node.dim.size == node.offset.size == node.axis.size
begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node.axis, node.offset)}).create_node()
end_values = np.array([node.offset[i] + node.dim[i] for i in range(len(node.dim))])
end = Const(graph, {'value': self.mask_normalizer(shape_rank, node.axis, end_values)}).create_node()
elif node.has_valid('crop_begin') and node.has_valid('crop_end'):
# Crop Type 3
node.crop_begin = self.list_to_ndarray(node.crop_begin)
shift_port = node.in_port(2) if ports_count > 2 and not node.in_port(2).disconnected() else None
output_port = node.out_port(0)
has_biases = True
has_weights = True
# We don't need zero biases
if shift_port is None or (shift_port.data.get_value() is not None and all([x == 0 for x in shift_port.data.get_value()])):
has_biases = False
# We don't need weights with ones
if scale_port is None or (scale_port.data.get_value() is not None and all([x == 1 for x in scale_port.data.get_value()])):
has_weights = False
mul_op = Mul(graph, dict(name=node.name + "/Mul_"))
add_op = Add(graph, dict(name=node.name + "/Add_"))
# Expand dims for current layout
broadcast_dims_cnt = len(input_port.data.get_shape()) - 2 if graph.graph['layout'] == 'NCHW' else 0
# In case if we have constant weights/biases we have to broadcast them according to graph layout
# otherwise we insert Reshape with broadcast dim attribute.
def broadcast_value(port):
value = np.array(port.data.get_value())
for idx in range(broadcast_dims_cnt):
value = np.expand_dims(value, axis=-1)
port.data.set_value(value)
def broadcast_with_reshape(port):
input_shape = input_port.data.get_shape()
reshape_dims = np.zeros(len(input_shape), dtype=np.int64)
for i in range(0, node.axis):
def replace_op(self, graph: Graph, node: Node):
add_node = scalar_ops_replacer(graph, node, Add)
return [add_node.id]
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Node, Graph
from extensions.ops.elementwise import Add, Maximum, Mul
class EltwiseNReplacement(FrontReplacementOp):
"""
This replacer substitutes elementwise operation with more than 2 inputs with a number of simple elementwise
operations with 2 inputs. The replacer supports operations supported by the Eltwise layer.
"""
op = 'EltwiseN'
enabled = True
op_to_class_map = {
'sum': Add,
'max': Maximum,
'mul': Mul,
}
def replace_op(self, graph: Graph, node: Node):
last_node = node
operation = node.operation
assert operation in EltwiseNReplacement.op_to_class_map
op_class = EltwiseNReplacement.op_to_class_map[operation]
left_connect = node.in_port(0).get_connection()
for ind in list(node.in_ports())[1:]:
attrs = {'name': node.name + '/' + operation + '_' + str(ind)}
attrs.update({'axis': node.axis} if node.has_valid('axis') else {})
# Create node
eltwise_op = op_class(graph, attrs).create_node()
def replace_pattern(self, graph: Graph, match: dict):
node = match['op']
if (node.data_format != b'NHWC' or
len(node.in_nodes()) != 5 or
node.in_node(0).value is not None or # input
node.in_node(1).value is None or # scale
node.in_node(2).value is None or # offset
node.in_node(3).value is not None or # mean
node.in_node(4).value is not None or # variance
node.in_node(1).value.ndim != 1 or
node.in_node(2).value.ndim != 1):
return
scale_mul = Mul(graph, dict(name=node.name + '/scale_mul_'))
shift_add = Add(graph, dict(name=node.name + '/shift_add_'))
mean_add = Add(graph, dict(name=node.name + '/mean_add_'))
variance_mul = Mul(graph, dict(name=node.name + '/variance_mul_'))
neg_const = Const(graph, dict(value=np.array(-1), name=node.name + '/mean_negate_'))
mean_negate = Mul(graph, dict(name=node.name + '/mean_negate_'))
mean_arg = mean_add.create_node_with_data([
node.in_node(0),
mean_negate.create_node_with_data([node.in_node(3),
neg_const.create_node_with_data()
])])
shift_const = Const(graph, dict(value=node.eps, name=node.name + '/variance_denom_shift_const_'))
power_const = Const(graph, dict(value=-0.5, name=node.name + '/variance_denom_power_const_'))
variance_denom_shift = Add(graph, dict(name=node.name + '/variance_denom_shift_'))
variance_denom_power = Pow(graph, dict(name=node.name + '/variance_denom_power_'))
variance_arg = variance_mul.create_node_with_data([
return
output_low = output_low.value
output_high = output_high.value
operator = match['operator']
if np.all(np.isclose(output_low, 0)) and np.all(np.isclose(output_high, 1)):
weights = operator.in_node(1).value
reduction_indices = set(range(len(weights.shape))) - set([operator.output_feature_channel])
weights_reduced = np.add.reduce(weights, axis=tuple(reduction_indices))
weights_reduced = weights_reduced.reshape([len(weights_reduced), 1, 1])
add_term = Const(graph, {'value': weights_reduced}).create_node()
add = Add(graph, {}).create_node()
add.in_port(1).connect(add_term.out_port(0))
mul_term = Const(graph, {'value': np.array(0.5)}).create_node()
mul = Mul(graph, {}).create_node()
mul.in_port(1).connect(mul_term.out_port(0))
add.out_port(0).connect(mul.in_port(0))
operator.out_port(0).get_connection().set_source(mul.out_port(0))
add.in_port(0).connect(operator.out_port(0))
operator['pad_value'] = float(-1.0)
elif np.all(np.isclose(output_low, -1)) and np.all(np.isclose(output_high, +1)):
pass
else:
log.debug('ConvToBinaryConv: cannot apply transformation because input range is neither in [0, +1] nor '
'in [-1, +1].')
return
assert len(op.in_ports()) == 1
last_port = op.in_port(0).get_source()
# Create Mul & Add nodes
if has_weights:
mul_weights = Const(graph, dict(value=op.scale, shape=op.scale.shape)).create_node()
mul_op = Mul(graph, dict(name=op.id + '/mul_')).create_node()
op.in_port(0).get_connection().set_destination(mul_op.in_port(0))
mul_weights.out_port(0).connect(mul_op.in_port(1))
last_port = mul_op.out_port(0)
if has_bias:
add_bias = Const(graph, dict(value=op.bias, shape=op.bias.shape)).create_node()
add_op = Add(graph, dict(name=op.id + '/add_')).create_node()
last_port.get_connection().set_destination(add_op.in_port(0))
add_bias.out_port(0).connect(add_op.in_port(1))
last_port = add_op.out_port(0)
op.in_port(0).disconnect()
op.out_port(0).get_connection().set_source(last_port)
first_mul_name = node.name
mul = mul * const_port_value
add = add * const_port_value
elif node.op == 'Add':
if first_add_name is None:
first_add_name = node.name
add = add + const_port_value
# If mul is scalar we broadcast it to biases shape
if mul.shape != add.shape and len(mul.shape) == 1 and mul.shape[0] == 1:
mul = np.array([mul[0] for x in range(add.shape[0])])
assert (np.array_equal(get_tensor_in_port(fnodes[0]).data.get_shape(), fnodes[-1].out_port(0).data.get_shape()))
mul_op = Mul(graph, dict(name='{}/Fused_Mul_'.format(first_mul_name or '')))
add_op = Add(graph, dict(name='{}/Fused_Add_'.format(first_add_name or '')))
in_port = get_tensor_in_port(fnodes[0])
out_port = fnodes[-1].out_port(0)
"""
Four cases considered below:
1. Mul and Add have valid values (mul value != 1 and add value != 0)
2. Only Mul has valid values, so we add only Mul node
3. Only Add has valid values, so we add only Add node
4. When Mul and Add has not valid values we just merge two data nodes
"""
if any([x != 0 for x in np.nditer(add)]) and any([x != 1 for x in np.nditer(mul)]):
# Const\ Const\
# ----->Mul------>Add-->
mul_const = Const(graph, dict(name="data_mul_", value=np.array(mul))).create_node()
add_const = Const(graph, dict(name="data_add_", value=np.array(add))).create_node()
gemm['input_channel_dim'] = 1 # MatMul weights in IO
gemm['output_channel_dim'] = 0
gemm['layout'] = 'NCHW'
gemm.in_port(1).bin = 'weights'
else:
B = gemm.in_node(1)
assert B.value is not None
if gemm.transpose_b:
B.value = B.value.transpose()
B.shape = np.array(B.value.shape, dtype=np.int64)
bias_node = Add(graph, {'name': 'MatMulBias_'}).create_node()
gemm.out_port(0).get_connection().set_source(bias_node.out_port(0))
gemm.in_port(2).get_connection().set_destination(bias_node.in_port(1))
gemm.out_port(0).connect(bias_node.in_port(0))
if graph.graph['cmd_params'].generate_experimental_IR_V10:
gemm.type = 'MatMul'
if gemm.has_valid('alpha'):
if not math.isclose(gemm.alpha, 1):
mul_node = Mul(graph, {'name': 'MatMulAlpha_'}).create_node()
const = Const(graph, {'value': np.array(gemm.alpha)}).create_node()
bias_node.in_port(0).get_connection().set_destination(mul_node.in_port(0))
bias_node.in_port(0).connect(mul_node.out_port(0))
mul_node.in_port(1).connect(const.out_port(0))
del gemm['alpha']
if gemm.has_valid('beta'):
def replace_pattern(graph: Graph, match: [str, Node]):
node = match['sub']
# Add new nodes
negate_const = Const(graph, dict(name=node.name + '/negate_const', value=np.array(-1))).create_node()
negate = Mul(graph, {'name': node.name + '/negate_'}).create_node()
add = Add(graph, {'name': node.name + '/add_'}).create_node()
# Connect nodes
node.in_port(1).get_connection().set_destination(negate.in_port(0))
negate_const.out_port(0).connect(add.in_port(1))
node.in_port(0).get_connection().set_destination(add.in_port(1))
negate.out_port(0).connect(add.in_port(0))
node.out_port(0).get_connection().set_source(add.out_port(0))