Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import tensorflow as tf
import numpy as np
import larq as lq
from larq import utils
from copy import deepcopy
@utils.register_keras_custom_object
class XavierLearningRateScaling(tf.keras.optimizers.Optimizer):
"""Optimizer wrapper for Xavier Learning Rate Scaling
Scale the weights learning rates respectively with the weights initialization
!!! note ""
This is a wrapper and does not implement any optimization algorithm.
!!! example
```python
optimizer = lq.optimizers.XavierLearningRateScaling(
tf.keras.optimizers.Adam(0.01), model
)
```
# Arguments
- `bias_constraint` for the bias.
```python
import larq as lq
lq.layers.QuantDense(64, kernel_constraint="weight_clip")
lq.layers.QuantDense(64, kernel_constraint=lq.constraints.WeightClip(2.))
"""
import tensorflow as tf
from larq import utils
@utils.register_keras_custom_object class WeightClip(tf.keras.constraints.Constraint): """Weight Clip constraint
Constrains the weights incident to each hidden unit
to be between `[-clip_value, clip_value]`.
# Arguments
clip_value: The value to clip incoming weights.
"""
def __init__(self, clip_value=1):
self.clip_value = clip_value
def __call__(self, x):
return tf.clip_by_value(x, -self.clip_value, self.clip_value)
@lq.utils.register_keras_custom_object
def xnor_weight_scale(x):
"""
Clips the weights between -1 and +1 and then calculates a scale factor per
weight filter. See https://arxiv.org/abs/1603.05279 for more details
"""
x = tf.clip_by_value(x, -1, 1)
alpha = tf.reduce_mean(tf.abs(x), axis=[0, 1, 2], keepdims=True)
return alpha * lq.quantizers.ste_sign(x)
@utils.register_keras_custom_object
def hard_tanh(x):
"""Hard tanh activation function.
```plot-activation
activations.hard_tanh
```
# Arguments
x: Input tensor.
# Returns
Hard tanh activation.
"""
return tf.clip_by_value(x, -1, 1)
but `get_training_metrics` can be used to directly access them.
!!! example
```python
get_training_metrics().clear()
get_training_metrics().add("flip_ratio")
```
# Returns
A set of training metrics in the current scope.
"""
return _GLOBAL_TRAINING_METRICS
@utils.register_alias("flip_ratio")
@utils.register_keras_custom_object
class FlipRatio(tf.keras.metrics.Metric):
"""Computes the mean ration of changed values in a given tensor.
!!! example
```python
m = metrics.FlipRatio(values_shape=(2,))
m.update_state((1, 1)) # result: 0
m.update_state((2, 2)) # result: 1
m.update_state((1, 2)) # result: 0.75
print('Final result: ', m.result().numpy()) # Final result: 0.75
```
# Arguments
values_shape: Shape of the tensor for which to track changes.
values_dtype: Data type of the tensor for which to track changes.
name: Name of the metric.
@utils.register_keras_custom_object
@utils.set_precision(2)
def dorefa_quantizer(x, k_bit=2):
r"""k_bit quantizer as in the DoReFa paper.
\\[
q(x) = \begin{cases}
0 & x < \frac{1}{2n} \\\
\frac{i}{n} & \frac{2i-1}{2n} < |x| < \frac{2i+1}{2n} \text{ for } i \in \\{1,n-1\\}\\\
1 & \frac{2n-1}{2n} < x
\end{cases}
\\]
where \\(n = 2^{\text{k_bit}} - 1\\). The number of bits, k_bit, needs to be passed as an argument.
The gradient is estimated using the Straight-Through Estimator
(essentially the binarization is replaced by a clipped identity on the
backward pass).
import tensorflow as tf
import larq as lq
from copy import deepcopy
import logging
logger = logging.Logger("rethink_logger")
@lq.utils.register_keras_custom_object
class Bop(tf.keras.optimizers.Optimizer):
"""Binary optimizer (Bop).
Bop is a latent-free optimizer for Binarized Neural Networks (BNNs) and
Binary Weight Networks (BWN).
Bop maintains an exponential moving average of the gradients controlled by
`gamma`. If this average exceeds the `threshold`, a weight is flipped.
Additionally, Bop accepts a regular optimizer that is applied to the
non-binary weights in the network.
The hyperparameter `gamma` is somewhat analogues to the learning rate in
SGD methods: a high `gamma` results in rapid convergence but also makes
training more noisy.
Note that the default `threshold` is not optimal for all situations.
@lq.utils.register_keras_custom_object
def clip_by_value_activation(x):
return tf.clip_by_value(x, 0, 1)