import logging
import os
import typing
import numpy as np
import matplotlib.pyplot as plt
import colour
import PIL
from .audio import Sndfile, AudioProcessor, samples_to_seconds
from .image import WaveformImage, SpectrogramImage
from .plotting import make_fitted_histogram, make_spectrogram_heatmap
from .color import (
COLOR_SCHEMES,
DEFAULT_COLOR_SCHEME_KEY,
interpolate_colors_rgb,
snap_to_colors,
InterpolateColorsArgType,
InterpolateColorsRetType,
load_from_file)
__all__ = [
"map_range_factory",
"AudioVisualizer"
]
module_logger = logging.getLogger(__name__)
def map_range_factory(
rnge0: typing.List[float],
rnge1: typing.Optional[typing.List[float]] = None
) -> typing.Callable[[float], float]:
"""
Create function that will map values from range of values to another.
Args:
rnge0: Map values from this range
rnge1: Map values to this range
Return:
callable: Takes as argument a value from rnge0 and returns a
mapped value in rnge1
"""
if rnge1 is None:
rnge1 = [0, 1.0]
diff0 = rnge0[1] - rnge0[0]
diff1 = rnge1[1] - rnge1[0]
factor = diff1/diff0
def map_range(val):
if val > rnge0[1]:
return rnge1[1]
elif val < rnge0[0]:
return rnge1[0]
else:
return factor*(val - rnge0[0]) + rnge1[0]
return map_range
class AudioVisualizer:
def __init__(self,
input_filename: str, *,
image_width: int,
image_height: int,
fft_size: int,
peak_width: int):
self._input_filename = input_filename
self._image_width = image_width
self.image_height = image_height
self._peak_width = peak_width
audio_file = Sndfile(input_filename, 'r')
self._samplerate = audio_file.samplerate
self._nframes = audio_file.nframes
audio_file.close()
self._adjusted_width = int(self._image_width / float(self._peak_width))
self._samples_per_pixel = self._nframes / float(self._adjusted_width)
if fft_size is None or fft_size == -1:
fft_size = int(self._samples_per_pixel)
if fft_size % 2 != 0:
fft_size -= 1
self._fft_size = fft_size
self._db_spectra = None
self._spectra = None
self._spectral_stats = None
self._peaks = None
module_logger.debug(f"AudioVisualizer.__init__: fft_size={fft_size}")
module_logger.debug(f"AudioVisualizer.__init__: image_width={self.image_width}, image_height={self.image_height}")
@property
def input_filename(self):
return self._input_filename
@property
def image_width(self):
return self._image_width
@property
def peak_width(self):
return self._peak_width
@property
def samplerate(self):
return self._samplerate
@property
def nframes(self):
return self._nframes
@property
def adjusted_width(self):
return self._adjusted_width
@property
def samples_per_pixel(self):
return self._samples_per_pixel
@property
def fft_size(self):
return self._fft_size
@property
def spectra(self):
return self._spectra
@property
def db_spectra(self):
return self._db_spectra
@property
def spectral_stats(self):
return self._spectral_stats
@property
def peaks(self):
return self._peaks
def smooth(self, x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
module_logger.debug(f"AudioVisualizer.smooth: window={window}")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
window_fn = getattr(np, window)
w = window_fn(window_len)
y = np.convolve(w/w.sum(), s, mode='valid')
window_len_2 = int(window_len/2)
module_logger.debug(f"AudioVisualizer.smooth: y.shape={y.shape}, window_len_2={window_len_2}")
return y[window_len_2:-window_len_2]
def get_palette(
self,
color_scheme: str,
color_scheme_key: str = "wave_colors",
color_scheme_file_path: str = None,
interpolate_fn: typing.Callable[[InterpolateColorsArgType], InterpolateColorsRetType] = None,
**interpolate_fn_kwargs
) -> typing.Tuple[typing.Tuple[int], InterpolateColorsRetType]:
"""
Get background color and color palette
Args:
color_scheme (str): Name of the color scheme
color_scheme_key (str): Name of the key in COLOR_SCHEMES[color_scheme].
Can either be "spec_colors" or "wave_colors"
color_scheme_file_path (str): Path to JSON or TOML file containing
externally defined color scheme
snap_to (bool): Whether to create a "snap to" color palette, or to
interpolate between reference color values
interpolate_fn (callable): Function to use to interpolate between
values
Returns:
tuple: background color as RGB tuple, and color palette as list of RGB tuples
"""
if interpolate_fn is None:
interpolate_fn = interpolate_colors_rgb
module_logger.debug(f"AudioVisualizer.get_palette: interpolate_fn={interpolate_fn}")
if color_scheme_file_path is None:
colors = COLOR_SCHEMES[color_scheme]
colors = colors[color_scheme_key]
background_color = colors[0]
palette = interpolate_fn(colors[1:], **interpolate_fn_kwargs)
else:
color_palettes = load_from_file(color_scheme_file_path)
if color_scheme not in color_palettes:
raise KeyError(f"{color_scheme} not found in {color_scheme_file_path}")
background_color = color_palettes[color_scheme]["background"]
palette = color_palettes[color_scheme]["palette"]
def to256(val):
return tuple([int(v*256.0) for v in val])
background_color = to256(background_color)
palette = interpolate_fn(palette, **interpolate_fn_kwargs)
# palette = [to256(val) for val in palette]
return
没有合适的资源?快使用搜索试试~ 我知道了~
温馨提示
共38个文件
py:35个
toml:1个
pkg-info:1个
资源分类:Python库 所属语言:Python 资源全名:blue-dot-sessions_gemscapes-1.5.1.tar.gz 资源来源:官方 安装方法:https://lanzao.blog.csdn.net/article/details/101784059
资源推荐
资源详情
资源评论
收起资源包目录
blue-dot-sessions_gemscapes-1.5.1.tar.gz (38个子文件)
blue-dot-sessions_gemscapes-1.5.1
PKG-INFO 793B
pyproject.toml 621B
LICENSE 1KB
setup.py 1KB
gemscapes
pipeline
wav2png.py 2KB
primitive.py 2KB
post_primitive.py 9KB
__init__.py 3KB
convert.py 2KB
post_wav2png.py 3KB
image
spectrogram_image.py 2KB
__init__.py 149B
waveform_image.py 4KB
plotting
__init__.py 3KB
metadata
__main__.py 781B
__init__.py 1KB
audio_visualizer.py 20KB
__main__.py 8KB
color
interpolate_colors.py 6KB
visualize_color_scheme.py 4KB
util.py 550B
__init__.py 468B
color_schemes.py 6KB
__init__.py 0B
conversion
__init__.py 7KB
audio
loudnorm.py 5KB
get_max_level.py 713B
pysndfile_wrapper.py 670B
get_sound_type.py 255B
audio_processor.py 8KB
__init__.py 328B
samples_to_seconds.py 207B
sndfile.py 55B
svg_tools
util.py 4KB
__main__.py 5KB
__init__.py 13KB
sandbox.py 6KB
exceptions.py 177B
共 38 条
- 1
资源评论
挣扎的蓝藻
- 粉丝: 13w+
- 资源: 15万+
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功