Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def apply_gain_stereo(seg, left_gain=0.0, right_gain=0.0):
"""
left_gain - amount of gain to apply to the left channel (in dB)
right_gain - amount of gain to apply to the right channel (in dB)
note: mono audio segments will be converted to stereo
"""
if seg.channels == 1:
left = right = seg
elif seg.channels == 2:
left, right = seg.split_to_mono()
l_mult_factor = db_to_float(left_gain)
r_mult_factor = db_to_float(right_gain)
left_data = audioop.mul(left._data, left.sample_width, l_mult_factor)
left_data = audioop.tostereo(left_data, left.sample_width, 1, 0)
right_data = audioop.mul(right._data, right.sample_width, r_mult_factor)
right_data = audioop.tostereo(right_data, right.sample_width, 0, 1)
output = audioop.add(left_data, right_data, seg.sample_width)
return seg._spawn(data=output,
overrides={'channels': 2,
'frame_width': 2 * seg.sample_width})
if duration:
if start is not None:
end = start + duration
elif end is not None:
start = end - duration
else:
duration = end - start
from_power = db_to_float(from_gain)
output = []
# original data - up until the crossfade portion, as is
before_fade = self[:start]._data
if from_gain != 0:
before_fade = audioop.mul(before_fade,
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
# (one gain step per sample)
if duration > 100:
scale_step = gain_delta / duration
for i in range(duration):
volume_change = from_power + (scale_step * i)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
# (one gain step per sample)
if duration > 100:
scale_step = gain_delta / duration
for i in range(duration):
volume_change = from_power + (scale_step * i)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
chunk = self[start + i]
chunk = audioop.mul(chunk._data,
self.sample_width,
volume_change)
output.append(chunk)
else:
start_frame = self.frame_count(ms=start)
end_frame = self.frame_count(ms=end)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)