VAD语音分割算法原理与简单实现

0 简介

 VAD也就是语音端点检测技术,是Voice Activity Detection的缩写。这个技术的主要任务是从带有噪声的语音中准确的定位出语音的开始和结束点

1 自己编写的基于幅数值与过0率实时检测分割录音简单实现,麦克风实时输入。

#coding=utf-8
""" Test on random audio from dataset and visualize the attention matrix.
"""

# from lib2to3.pgen2 import token
# import torch
import os
import numpy as np
import argparse
# import torchaudio
import time
# import build_model

import pyaudio
import wave
import numpy as np

CHUNK = 2000
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
WAVE_OUTPUT_FILENAME = "cache.wav"

p = pyaudio.PyAudio()

def start_stream(id):#数据流
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK,
                    input_device_index=id
                    )
    return stream

# 自定义函数,计算数值的符号。
def sgn(data):
    if data >= 0 :
        return 1
    else :
        return 0
#计算过零率
def calZeroCrossingRate(wave_data) :
    zeroCrossingRate = []
    sum = 0
    for i in range(len(wave_data)) :
        sum = sum + np.abs(sgn(wave_data[i]) - sgn(wave_data[i - 1]))
    return sum


def Monitor(stream : pyaudio.Stream):
    
    frames = []
    while (True):
        data = stream.read(CHUNK,exception_on_overflow=False)
        audio_data = np.fromstring(data, dtype=np.short)
        # print(calZeroCrossingRate(audio_data))
        temp = np.max(np.abs(audio_data))
        zerorate=calZeroCrossingRate(audio_data)/2000
        print('当前阈值:',temp)

        if temp > 15000 :
         if(zerorate)<0.1:
            print("检测到人声!开始缓存录音")
            frames.append(data)
            # time.sleep(0.5)
            break
    while (True):
        data = stream.read(CHUNK,exception_on_overflow=False)
        audio_data = np.fromstring(data, dtype=np.short)
        frames.append(data)
        temp = np.max(np.abs(audio_data))
        print('当前阈值:',temp) 
        if temp < 2000 :
            # time.sleep(0.5)
            break
    print("结束录音!")
    wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()

def main():
    
    parser = argparse.ArgumentParser(description="Test on random audio from dataset and visualize the attention matrix.")
    # parser.add_argument('ckpt', type=str, help="Checkpoint to restore.")
    # parser.add_argument('--beams', default=1, type=int, help="Beam Search width.")
    parser.add_argument('--micId', required=True, type=int)
    # parser.add_argument('--tokenizer', required=True, type=str, help="tokenizerPath.")
    args = parser.parse_args()
    stream = start_stream(args.micId)
    while(1):
        Monitor(stream)

if __name__ == '__main__':
    main()

 2 wav格式输入

#coding=utf-8
""" Test on random audio from dataset and visualize the attention matrix.
"""

# from lib2to3.pgen2 import token
# import torch
import os
import numpy as np
import argparse
# import torchaudio
import time
# import build_model
import math
import wave
import numpy as np

# 自定义函数,计算数值的符号。
def sgn(data):
    if data >= 0 :
        return 1
    else :
        return 0
#计算过零率
def calZeroCrossingRate(wave_data) :
    zeroCrossingRate = []
    sum = 0
    for i in range(len(wave_data)) :
        sum = sum + np.abs(sgn(wave_data[i]) - sgn(wave_data[i - 1]))
    return sum


def main():
    
    frames = []
    wavread = wave.open(r'/home/sunshine/桌面/code_C_PY_2022/py/8.pyaudio看麦wave录音/nr降噪.wav','rb')
    fs = wavread.getframerate() #sampling freqency
    print("lg------getframerate",type(fs),fs)
    Nwavlen = wavread.getnframes() #num of total audio data points
    print("lg------getnframes",type(Nwavlen),Nwavlen)
    Nchannel = wavread.getnchannels() #num of channels
    print("lg------Nchannel",type(Nchannel),Nchannel)
    wav_str = wavread.readframes(Nwavlen)#核心的读

    wav_int = np.frombuffer(wav_str, dtype=np.int16)#转文件数据格式
    print(wav_int.shape)

    print(np.max(wav_int))
    for i in (range(wav_int.size)[0::800]):
        if(np.max(wav_int[i:i+800])>1000 and (calZeroCrossingRate(wav_int[i:i+800])/2000)<0.15):
            print("语音片段",i/8000,'s')
        i+=800

if __name__ == '__main__':
    main()

3 基于webrtc vad算法

'''
Requirements:
+ pyaudio - `pip install pyaudio`
+ py-webrtcvad - `pip install webrtcvad`

'''
import webrtcvad
import collections
import sys
import signal
import pyaudio

from array import array
from struct import pack
import wave
import time

FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK_DURATION_MS = 30       # supports 10, 20 and 30 (ms)
PADDING_DURATION_MS = 1500   # 1 sec jugement
CHUNK_SIZE = int(RATE * CHUNK_DURATION_MS / 1000)  # chunk to read
CHUNK_BYTES = CHUNK_SIZE * 2  # 16bit = 2 bytes, PCM
NUM_PADDING_CHUNKS = int(PADDING_DURATION_MS / CHUNK_DURATION_MS)
# NUM_WINDOW_CHUNKS = int(240 / CHUNK_DURATION_MS)
NUM_WINDOW_CHUNKS = int(400 / CHUNK_DURATION_MS)  # 400 ms/ 30ms  ge
NUM_WINDOW_CHUNKS_END = NUM_WINDOW_CHUNKS * 2

START_OFFSET = int(NUM_WINDOW_CHUNKS * CHUNK_DURATION_MS * 0.5 * RATE)

vad = webrtcvad.Vad(1)

pa = pyaudio.PyAudio()
stream = pa.open(format=FORMAT,
                 channels=CHANNELS,
                 rate=RATE,
                 input=True,
                 start=False,
                 input_device_index=6,#################麦克风ID
                 frames_per_buffer=CHUNK_SIZE)


got_a_sentence = False
leave = False


def handle_int(sig, chunk):
    global leave, got_a_sentence
    leave = True
    got_a_sentence = True


def record_to_file(path, data, sample_width):
    "Records from the microphone and outputs the resulting data to 'path'"
    # sample_width, data = record()
    data = pack('<' + ('h' * len(data)), *data)
    wf = wave.open(path, 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(sample_width)
    wf.setframerate(RATE)
    wf.writeframes(data)
    wf.close()


def normalize(snd_data):
    "Average the volume out"
    MAXIMUM = 32767  # 16384
    times = float(MAXIMUM) / max(abs(i) for i in snd_data)
    r = array('h')
    for i in snd_data:
        r.append(int(i * times))
    return r

signal.signal(signal.SIGINT, handle_int)

while not leave:
    ring_buffer = collections.deque(maxlen=NUM_PADDING_CHUNKS)
    triggered = False
    voiced_frames = []
    ring_buffer_flags = [0] * NUM_WINDOW_CHUNKS
    ring_buffer_index = 0

    ring_buffer_flags_end = [0] * NUM_WINDOW_CHUNKS_END
    ring_buffer_index_end = 0
    buffer_in = ''
    # WangS
    raw_data = array('h')
    index = 0
    start_point = 0
    StartTime = time.time()
    print("* recording: ")
    stream.start_stream()

    while not got_a_sentence and not leave:
        chunk = stream.read(CHUNK_SIZE)
        # add WangS
        raw_data.extend(array('h', chunk))
        index += CHUNK_SIZE
        TimeUse = time.time() - StartTime

        active = vad.is_speech(chunk, RATE)

        sys.stdout.write('1' if active else '_')
        ring_buffer_flags[ring_buffer_index] = 1 if active else 0
        ring_buffer_index += 1
        ring_buffer_index %= NUM_WINDOW_CHUNKS

        ring_buffer_flags_end[ring_buffer_index_end] = 1 if active else 0
        ring_buffer_index_end += 1
        ring_buffer_index_end %= NUM_WINDOW_CHUNKS_END

        # start point detection
        if not triggered:
            ring_buffer.append(chunk)
            num_voiced = sum(ring_buffer_flags)
            if num_voiced > 0.8 * NUM_WINDOW_CHUNKS:
                sys.stdout.write(' Open ')
                triggered = True
                start_point = index - CHUNK_SIZE * 20  # start point
                # voiced_frames.extend(ring_buffer)
                ring_buffer.clear()
        # end point detection
        else:
            # voiced_frames.append(chunk)
            ring_buffer.append(chunk)
            num_unvoiced = NUM_WINDOW_CHUNKS_END - sum(ring_buffer_flags_end)
            if num_unvoiced > 0.90 * NUM_WINDOW_CHUNKS_END or TimeUse > 10:
                sys.stdout.write(' Close ')
                triggered = False
                got_a_sentence = True

        sys.stdout.flush()

    sys.stdout.write('\n')
    # data = b''.join(voiced_frames)

    stream.stop_stream()
    print("* done recording")
    got_a_sentence = False

    # write to file
    raw_data.reverse()
    for index in range(start_point):
        raw_data.pop()
    raw_data.reverse()
    raw_data = normalize(raw_data)
    record_to_file("recording.wav", raw_data, 2)
    leave = True

stream.close()

你可能感兴趣的:(算法,python,语音识别)