FBank、MFCC、PLP实现

FBank、MFCC、PLP实现_第1张图片

from scipy.fftpack import dct
from scipy.io import wavfile
import matplotlib.pyplot as plt
import sys
import librosa
import wave
import contextlib
import numpy as np

import matplotlib as mpl
mpl.use('TkAgg')

def plot_spectrogram(spec, name):
    fig = plt.figure(figsize=(20, 5))
    heatmap = plt.pcolor(spec)
    fig.colorbar(mappable=heatmap)
    plt.xlabel('Frames')
    # tight_layout 会自动调整子图参数,使之填充整个图像区域
    # plt.tight_layout()
    plt.title(name)
    plt.savefig(name+'.jpg')
    plt.show()

# PLP
# -----定义两个公式,bark_change为线性频率坐标转换为Bark坐标,equal_loudness为等响度曲线--------


def bark_change(x):
    return 6*np.log10(x/(1200*np.pi)+((x/(1200*np.pi))**2+1)**0.5)


def equal_loudness(x):
    return ((x**2+56.8e6)*x**4)/((x**2+6.3e6)**2*(x**2+3.8e8))


if __name__ == "__main__":
    # 数据准备
    wav_file = "test.wav"
    # sample_rate:是wav文件的采样率, signal是文件的内容,即语音信号
    sample_rate, signal = wavfile.read(wav_file)
    # 保留前10s数据
    signal = signal[0: int(10 * sample_rate)]

    # 1.预加重
    pre_emphasis = 0.97
    emphasized_signal = np.append(
        signal[0], signal[1:] - pre_emphasis * signal[:-1])

    # 2.分帧
    frame_size, frame_stride = 0.025, 0.01  # 帧长为 25ms,帧移为 10ms
    frame_length, frame_step = int(
        round(frame_size * sample_rate)), int(round(frame_stride * sample_rate))
    signal_length = len(emphasized_signal)
    num_frames = int(
        np.ceil(np.abs(signal_length - frame_length) / frame_step)) + 1
    pad_signal_length = (num_frames - 1) * frame_step + frame_length
    z = np.zeros((pad_signal_length - signal_length))
    # 分帧后最后一帧点数不足,则补零
    pad_signal = np.append(emphasized_signal, z)
    # 每个帧的下表
    indices = np.arange(0, frame_length).reshape(
        1, -1) + np.arange(0, num_frames*frame_step, frame_step).reshape(-1, 1)
    # frames 是二维数组,每一行是一帧,列数是每帧的采样点数,之后的短时 fft 直接在每一列上操作
    frames = pad_signal[indices]
    print("shape of frames:", frames.shape)

    # 3.加汉明窗
    hamming = np.hamming(frame_length)
    # hamming = 0.54 - 0.46 * np.cos(2 * np.pi * np.arange(0, frame_length) /(frame_length - 1))
    frames *= hamming

    # 4.快速傅立叶变换(FFT)
    NFFT = 512
    mag_frames = np.absolute(np.fft.rfft(frames, NFFT))

    # 5.功率谱
    pow_frames = ((1.0 / NFFT) * (mag_frames ** 2))

    # 6.Fbank
    low_freq_mel = 0
    # 最高mel值,最大信号频率为fs/2
    high_freq_mel = 2595 * np.log10(1 + (sample_rate / 2) / 700)
    nfilt = 40
    mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2)
    # 所有的 mel 中心点,为了方便后面计算 mel 滤波器组,左右两边各补一个中心点
    hz_points = 700 * (10 ** (mel_points / 2595) - 1)
    fbank = np.zeros((nfilt, int(NFFT / 2 + 1)))  # 各个 mel 滤波器在能量谱对应点的取值
    bin = (hz_points / (sample_rate / 2)) * \
        (NFFT / 2)  # 各个 mel 滤波器中心点对应 FFT 的区域编码,找到有值的位置
    for i in range(1, nfilt + 1):
        left = int(bin[i-1])
        center = int(bin[i])
        right = int(bin[i+1])
        for j in range(left, center):
            fbank[i-1, j+1] = (j + 1 - bin[i-1]) / (bin[i] - bin[i-1])
        for j in range(center, right):
            fbank[i-1, j+1] = (bin[i+1] - (j + 1)) / (bin[i+1] - bin[i])
    # mel滤波
    filter_banks = np.dot(pow_frames, fbank.T)
    filter_banks = np.where(
        filter_banks == 0, np.finfo(float).eps, filter_banks)
    # 取对数
    filter_banks = 20 * np.log10(filter_banks)  # dB
    print("shape of FBank", filter_banks.shape)
    plot_spectrogram(filter_banks.T, 'FBank')

    # 7.MFCC
    num_ceps = 12  # 保留的倒谱系数的个数
    mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 1:(num_ceps+1)]
    print("shape of MFCC", mfcc.shape)
    plot_spectrogram(mfcc.T, 'MFCC')

    # 8.PLP
    N = int(NFFT / 2 + 1)
    df = sample_rate/N  # 频率分辨率
    i = np.arange(N)  # 只取大于0部分的频率
    freq_hz = i*df  # 得到实际频率坐标: 0 -> sample_rate

    freq_w = 2*np.pi*np.array(freq_hz)  # 转换为角频率
    freq_bark = bark_change(freq_w)  # 再转换为bark频率

    # 选取的临界频带数量一般要大于10,覆盖常用频率范围,这里我选取了15个中心频点
    point_hz = [250, 350, 450, 570, 700, 840, 1000,
                1170, 1370, 1600, 1850, 2150, 2500, 2900, 3400]
    point_w = 2*np.pi*np.array(point_hz)  # 转换为角频率
    point_bark = bark_change(point_w)  # 转换为bark频率

    bank = np.zeros((15, N))  # 构造15行257列的矩阵,每一行为一个滤波器向量
    filter_data = np.zeros(15)  # 构造15维频带能量向量
    # -------构造滤波器组---------
    for j in range(15):
        for k in range(N):
            omg = freq_bark[k] - point_bark[j]
            if -1.3 < omg < -0.5:
                bank[j, k] = 10**(2.5*(omg+0.5))
            elif -0.5 < omg < 0.5:
                bank[j, k] = 1
            elif 0.5 < omg < 2.5:
                bank[j, k] = 10**(-1.0*(omg-0.5))
            else:
                bank[j, k] = 0
    bark_filter_banks = np.dot(pow_frames, bank.T)

    # 等响度预加重
    equal_data = equal_loudness(point_w)*bark_filter_banks

    # 强度响度转换,近似模拟声音的强度与人耳感受的响度间的非线性关系
    cubic_data = equal_data**0.33

    # ---做30点的ifft(逆傅里叶变换),得到30维PLP向量------------
    plp_data = np.fft.ifft(cubic_data, 30)

    # 线性预测
    plp = np.zeros((plp_data.shape[0], 12))
    for i in range(plp_data.shape[0]):
        plp[i] = librosa.lpc(abs(plp_data[i]), order=12)[1:]
    print("shape of PLP", plp.shape)
    plot_spectrogram(plp.T, 'PLP')

FBank、MFCC、PLP实现_第2张图片FBank、MFCC、PLP实现_第3张图片FBank、MFCC、PLP实现_第4张图片

你可能感兴趣的:(语音识别,python,开发语言,人工智能,语音识别)