疫情原因,家里的老师亲戚需要对着电脑上网课,晋升为十八线小主播~
备课的内容来源都是当地教育局的公开课,为了学习公开课的上课方法,只能自己慢速播放视频,并手动敲下所有汉字,此时我作为一个家里人眼中的‘电脑高手’,被迫上场表演,那么就来看看如何做到自动转换视频中的语音到文字呢?
要实现自动转换视频中的语音到文字的功能需要两步,
第一,抽取视频中的语音流数据
第二,使用语音识别模型识别语音流数据
语言:python 3.6/3.6+
视频格式: MP4
我是用python库moviepy (链接:https://pypi.org/project/moviepy)
安装方法:pip install moviepy
目的:从mp4视频中抽取语音流数据并转换成wav格式
使用方法:
from moviepy.editor import *
import os
file_name = 'SVID_20200429_151304_1.mp4'
dir_name = file_name.replace('.mp4', '')
cmd1 = 'mkdir /Users/user/Documents/{}'.format(dir_name)
os.system(cmd1) # 新视频就建立个新目录存放
video = VideoFileClip('/Users/user/Documents/{}.mp4'.format(dir_name))
audio = video.audio
audio.write_audiofile('/Users/user/Documents/{}.wav'.format(dir_name)) # 主要转化函数
自己尝试自己训练了下语音模型,使用开源数据下效果只能说是一般;故而使用百度云的语音识别API,目前(2020.05)只要注册就可以免费调用50000次短语音识别(语音长度不超过60秒)
注册地址:https://login.bce.baidu.com/?redirect=http%3A%2F%2Fcloud.baidu.com%2Fcampaign%2FPromotionApr%2Findex.html
注册完成后记得建立应用并记录对应的API_KEY和SECRET_KEY,这是你调用API的身份凭识
具体调用脚本可以参考百度云在git上的demo,注意百度短语音识别只支持单声道,16000码率的音频,这需要我们转换下音频的格式,需要使用到ffmpeg,mac直接使用homebrew安装即可
具体转换命令:ffmpeg -y -i wavfile_name -acodec pcm_s16le -f s16le -ac 1 -ar 16000 pcmfile_name
整体示例代码
# coding=utf-8
import sys
import json
import base64
import time
from scipy.io import wavfile
import subprocess
import os
IS_PY3 = sys.version_info.major == 3
if IS_PY3:
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from urllib.parse import urlencode
timer = time.perf_counter
else:
from urllib2 import urlopen
from urllib2 import Request
from urllib2 import URLError
from urllib import urlencode
if sys.platform == "win32":
timer = time.clock
else:
# On most other platforms the best timer is time.time()
timer = time.time
API_KEY = '你的api key'
SECRET_KEY = '你的secret key'
# 需要识别的文件
AUDIO_FILE = '/Users/user/Documents/test2.pcm' # 只支持 pcm/wav/amr 格式,极速版额外支持m4a 格式
# 文件格式
FORMAT = AUDIO_FILE[-3:] # 文件后缀只支持 pcm/wav/amr 格式,极速版额外支持m4a 格式
CUID = '123456PYTHON'
# 采样率
RATE = 16000 # 固定值
# 普通版
DEV_PID = 1537 # 1537 表示识别普通话,使用输入法模型。根据文档填写PID,选择语言及识别模型
ASR_URL = 'http://vop.baidu.com/server_api'
SCOPE = 'audio_voice_assistant_get' # 有此scope表示有asr能力,没有请在网页里勾选,非常旧的应用可能没有
#测试自训练平台需要打开以下信息, 自训练平台模型上线后,您会看见 第二步:“”获取专属模型参数pid:8001,modelid:1234”,按照这个信息获取 dev_pid=8001,lm_id=1234
# DEV_PID = 8001 ;
# LM_ID = 1234 ;
# 极速版 打开注释的话请填写自己申请的appkey appSecret ,并在网页中开通极速版(开通后可能会收费)
# DEV_PID = 80001
# ASR_URL = 'http://vop.baidu.com/pro_api'
# SCOPE = 'brain_enhanced_asr' # 有此scope表示有极速版能力,没有请在网页里开通极速版
# 忽略scope检查,非常旧的应用可能没有
# SCOPE = False
class DemoError(Exception):
pass
""" TOKEN start """
TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token'
def fetch_token():
params = {'grant_type': 'client_credentials',
'client_id': API_KEY,
'client_secret': SECRET_KEY}
post_data = urlencode(params)
if (IS_PY3):
post_data = post_data.encode( 'utf-8')
req = Request(TOKEN_URL, post_data)
try:
f = urlopen(req)
result_str = f.read()
except URLError as err:
print('token http response http code : ' + str(err.code))
result_str = err.read()
if (IS_PY3):
result_str = result_str.decode()
print(result_str)
result = json.loads(result_str)
print(result)
if ('access_token' in result.keys() and 'scope' in result.keys()):
print(SCOPE)
if SCOPE and (not SCOPE in result['scope'].split(' ')): # SCOPE = False 忽略检查
raise DemoError('scope is not correct')
print('SUCCESS WITH TOKEN: %s EXPIRES IN SECONDS: %s' % (result['access_token'], result['expires_in']))
return result['access_token']
else:
raise DemoError('MAYBE API_KEY or SECRET_KEY not correct: access_token or scope not found in token response')
""" TOKEN end """
if __name__ == '__main__':
token = fetch_token()
from moviepy.editor import *
import os
file_name = 'SVID_20200429_151304_1.mp4'
dir_name = file_name.replace('.mp4', '')
cmd1 = 'mkdir /Users/user/Documents/{}'.format(dir_name)
os.system(cmd1)
video = VideoFileClip('/Users/user/Documents/{}.mp4'.format(dir_name))
audio = video.audio
audio.write_audiofile('/Users/user/Documents/{}.wav'.format(dir_name))
speech_data = []
like = wavfile.read('/Users/user/Documents/{}.wav'.format(dir_name))
start_sec = 10
end_sec = 1777
intern = 55
# start_sec: 识别起始秒数,end_sec:识别结束秒数, intern : 每次调用秒数间隔,不能超过60,保守起见选择55秒
with open("/Users/user/Documents/{}/result.txt".format(dir_name), "w") as of:
for i in range(start_sec, end_sec, intern):
j = min(end_sec, i + intern)
wavfile_name = '/Users/user/Documents/{}/test_{}_{}.wav'.format(dir_name, i, j)
pcmfile_name = '/Users/user/Documents/{}/test_{}_{}.pcm'.format(dir_name, i, j)
wavfile.write(wavfile_name, 44100, like[1][i * 44100:j * 44100])
cmdline = "ffmpeg -y -i {} -acodec pcm_s16le -f s16le -ac 1 -ar 16000 {}".format(wavfile_name, pcmfile_name)
ret = os.system(cmdline)
print('code: ', ret)
with open(pcmfile_name, 'rb') as speech_file:
speech_data = speech_file.read()
length = len(speech_data)
if length == 0:
raise DemoError('file %s length read 0 bytes' % AUDIO_FILE)
speech = base64.b64encode(speech_data)
if (IS_PY3):
speech = str(speech, 'utf-8')
params = {'dev_pid': DEV_PID,
# "lm_id" : LM_ID, #测试自训练平台开启此项
'format': FORMAT,
'rate': RATE,
'token': token,
'cuid': CUID,
'channel': 1,
'speech': speech,
'len': length
}
post_data = json.dumps(params, sort_keys=False)
# print post_data
req = Request(ASR_URL, post_data.encode('utf-8'))
req.add_header('Content-Type', 'application/json')
try:
begin = timer()
f = urlopen(req)
result_str = f.read()
print("Request time cost %f" % (timer() - begin))
except URLError as err:
print('asr http response http code : ' + str(err.code))
result_str = err.read()
if (IS_PY3):
result_str = str(result_str, 'utf-8')
print(result_str)
dd = json.loads(result_str)
of.write(dd['result'][0] + '\n')