Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Broadcasts the parameters from root rank to all other processes.
Typical usage is to broadcast the `Module.get_params()`.
Arguments:
params: dict of parameters to broadcast
root_rank: The rank of the process from which parameters will be
broadcasted to all other processes.
"""
global parameter_index
if isinstance(params, dict):
tensors = [p for _, p in sorted(params.items())]
# Run tensor initilization
for i in range(len(tensors)):
byteps_declare_tensor("parameter_" + str(parameter_index))
# Broadcast is implemented as push + pull in BytePS
# To broadcast: we should zero-out all non-root tensors, and disable push_pull average
if rank() != root_rank:
tensors[i].__imul__(0)
byteps_push_pull(tensors[i], version=0, priority=0,
name="parameter_" + str(parameter_index), is_average=False)
parameter_index += 1
# Make sure tensors pushed to MXNet engine get processed such that all
# workers are synced before starting training.
for tensor in tensors:
tensor.wait_to_read()
elif isinstance(params, mx.gluon.parameter.ParameterDict):
raise TypeError("For gluon users, you should not call this function. "
"DistributedTrainer will broadcast all parameters at "
def _do_push_pull_param(self, index, delta_weight):
if isinstance(index, (tuple, list)):
for i in range(len(index)):
byteps_declare_tensor("weight_" + str(index[i]))
byteps_push_pull(delta_weight[i], version=0, priority=-index[i],
name="weight_" + str(index[i]), is_average=False)
else:
byteps_declare_tensor("weight_" + str(index))
byteps_push_pull(delta_weight, version=0, priority=-index,
name="weight_" + str(index), is_average=False)
def _do_push_pull(self, index, grad):
if isinstance(index, (tuple, list)):
for i in range(len(index)):
byteps_declare_tensor("gradient_" + str(index[i]))
byteps_push_pull(grad[i], version=0, priority=-index[i],
name="gradient_" + str(index[i]), is_average=True)
else:
byteps_declare_tensor("gradient_" + str(index))
byteps_push_pull(grad, version=0, priority=-index,
name="gradient_" + str(index), is_average=True)
param_list = []
if isinstance(params, mx.gluon.ParameterDict):
for key in sorted(list(params.keys())):
param_list.append(params[key])
super(DistributedTrainer, self).__init__(
param_list, optimizer, optimizer_params=optimizer_params, kvstore=None)
# _scale is used to check and set rescale_grad for optimizer in Trainer.step()
# function. Normalizing it by BytePS size, which is equivalent to performing
# average in push_pull, has better performance.
self._scale /= size()
self.root_rank = root_rank
for i, param in enumerate(self._params):
byteps_declare_tensor("parameter_" + str(i))
if param.grad_req != 'null':
byteps_declare_tensor("gradient_" + str(i))
def _do_push_pull(self, index, grad):
if isinstance(index, (tuple, list)):
for i in range(len(index)):
byteps_declare_tensor("gradient_" + str(index[i]))
byteps_push_pull(grad[i], version=0, priority=-index[i],
name="gradient_" + str(index[i]), is_average=True)
else:
byteps_declare_tensor("gradient_" + str(index))
byteps_push_pull(grad, version=0, priority=-index,
name="gradient_" + str(index), is_average=True)