Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
def normalize(seg, headroom=0.1):
"""
headroom is how close to the maximum volume to boost the signal up to (specified in dB)
"""
peak_sample_val = seg.max
# if the max is 0, this audio segment is silent, and can't be normalized
if peak_sample_val == 0:
return seg
target_peak = seg.max_possible_amplitude * db_to_float(-headroom)
needed_boost = ratio_to_db(target_peak / peak_sample_val)
return seg.apply_gain(needed_boost)
fade_frames = end_frame - start_frame
scale_step = gain_delta / fade_frames
for i in range(int(fade_frames)):
volume_change = from_power + (scale_step * i)
sample = self.get_frame(int(start_frame + i))
sample = audioop.mul(sample, self.sample_width, volume_change)
output.append(sample)
# original data after the crossfade portion, at the new volume
after_fade = self[end:]._data
if to_gain != 0:
after_fade = audioop.mul(after_fade,
self.sample_width,
db_to_float(to_gain))
output.append(after_fade)
return self._spawn(data=output)
def apply_gain(self, volume_change):
return self._spawn(data=audioop.mul(self._data, self.sample_width,
db_to_float(float(volume_change))))
def detect_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1):
seg_len = len(audio_segment)
# you can't have a silent portion of a sound that is longer than the sound
if seg_len < min_silence_len:
return []
# convert silence threshold to a float value (so we can compare it to rms)
silence_thresh = db_to_float(silence_thresh) * audio_segment.max_possible_amplitude
# find silence and add start and end indicies to the to_cut list
silence_starts = []
# check successive (1 sec by default) chunk of sound for silence
# try a chunk at every "seek step" (or every chunk for a seek step == 1)
last_slice_start = seg_len - min_silence_len
slice_starts = range(0, last_slice_start + 1, seek_step)
# guarantee last_slice_start is included in the range
# to make sure the last portion of the audio is seached
if last_slice_start % seek_step:
slice_starts = itertools.chain(slice_starts, [last_slice_start])
for i in slice_starts:
audio_slice = audio_segment[i:i + min_silence_len]
pos = 0
seg1_len = len(seg1)
seg2_len = len(seg2)
while times:
remaining = max(0, seg1_len - pos)
if seg2_len >= remaining:
seg2 = seg2[:remaining]
seg2_len = remaining
# we've hit the end, we're done looping (if we were) and this
# is our last go-around
times = 1
if gain_during_overlay:
seg1_overlaid = seg1[pos:pos + seg2_len]
seg1_adjusted_gain = audioop.mul(seg1_overlaid, self.sample_width,
db_to_float(float(gain_during_overlay)))
output.write(audioop.add(seg1_adjusted_gain, seg2, sample_width))
else:
output.write(audioop.add(seg1[pos:pos + seg2_len], seg2,
sample_width))
pos += seg2_len
# dec times to break our while loop (eventually)
times -= 1
output.write(seg1[pos:])
return spawn(data=output)
def detect_silence(audio_segment, min_silence_len=1000, silence_thresh=-16, seek_step=1):
seg_len = len(audio_segment)
# you can't have a silent portion of a sound that is longer than the sound
if seg_len < min_silence_len:
return []
# convert silence threshold to a float value (so we can compare it to rms)
silence_thresh = db_to_float(silence_thresh) * audio_segment.max_possible_amplitude
# find silence and add start and end indicies to the to_cut list
silence_starts = []
# check successive (1 sec by default) chunk of sound for silence
# try a chunk at every "seek step" (or every chunk for a seek step == 1)
last_slice_start = seg_len - min_silence_len
slice_starts = range(0, last_slice_start + 1, seek_step)
# guarantee last_slice_start is included in the range
# to make sure the last portion of the audio is seached
if last_slice_start % seek_step:
slice_starts = itertools.chain(slice_starts, [last_slice_start])
for i in slice_starts:
audio_slice = audio_segment[i:i + min_silence_len]
start += len(self)
if end is not None and end < 0:
end += len(self)
if duration is not None and duration < 0:
raise InvalidDuration("duration must be a positive integer")
if duration:
if start is not None:
end = start + duration
elif end is not None:
start = end - duration
else:
duration = end - start
from_power = db_to_float(from_gain)
output = []
# original data - up until the crossfade portion, as is
before_fade = self[:start]._data
if from_gain != 0:
before_fade = audioop.mul(before_fade,
self.sample_width,
from_power)
output.append(before_fade)
gain_delta = db_to_float(to_gain) - from_power
# fades longer than 100ms can use coarse fading (one gain step per ms),
# shorter fades will have audible clicks so they use precise fading
#(one gain step per sample)
def apply_gain_stereo(seg, left_gain=0.0, right_gain=0.0):
"""
left_gain - amount of gain to apply to the left channel (in dB)
right_gain - amount of gain to apply to the right channel (in dB)
note: mono audio segments will be converted to stereo
"""
if seg.channels == 1:
left = right = seg
elif seg.channels == 2:
left, right = seg.split_to_mono()
l_mult_factor = db_to_float(left_gain)
r_mult_factor = db_to_float(right_gain)
left_data = audioop.mul(left._data, left.sample_width, l_mult_factor)
left_data = audioop.tostereo(left_data, left.sample_width, 1, 0)
right_data = audioop.mul(right._data, right.sample_width, r_mult_factor)
right_data = audioop.tostereo(right_data, right.sample_width, 0, 1)
output = audioop.add(left_data, right_data, seg.sample_width)
return seg._spawn(data=output,
overrides={'channels': 2,
'frame_width': 2 * seg.sample_width})
def to_audio_segment(self, duration=1000.0, volume=0.0):
"""
Duration in milliseconds
(default: 1 second)
Volume in DB relative to maximum amplitude
(default 0.0 dBFS, which is the maximum value)
"""
minval, maxval = get_min_max_value(self.bit_depth)
sample_width = get_frame_width(self.bit_depth)
array_type = get_array_type(self.bit_depth)
gain = db_to_float(volume)
sample_count = int(self.sample_rate * (duration / 1000.0))
sample_data = (int(val * maxval * gain) for val in self.generate())
sample_data = itertools.islice(sample_data, 0, sample_count)
data = array.array(array_type, sample_data)
try:
data = data.tobytes()
except:
data = data.tostring()
return AudioSegment(data=data, metadata={
"channels": 1,
"sample_width": sample_width,
"frame_rate": self.sample_rate,