按照官网教程,kaldi的安装首先通过git获取项目,再进行编译。
git clone https://github.com/kaldi-asr/kaldi.git
cd kaldi/tools/; make; cd ../src; ./configure; make
如果报错,则可能是相关的依赖项没有安装,可按照提示一步步安装(需要root权限)。
sudo apt-get install zlib1g-dev automake autoconf sox subversion
sudo bash extras/install_mkl.sh
egs目录下放着各个数据库的样例代码,一个文件夹就是一个数据库,非常全面。进入egs/librispeech/s5/.每个代码里边都会有一份cmd.sh(引入单机多卡run.pl或者多机多卡quene.pl模式), path.sh(引入各种kaldi的路径), run.sh(训练及测试的整个主流程)。以下主要细看run.sh,整体流程为 导入参数->下载部分数据并预处理->准备并创建语言模型->提取特征->训练部分数据集->训练单因素、三音素模型并变换训练->加入更多数据集->变换训练->加入全部数据集->变换训练->解码->训练tdnn模型。具体如下:
#!/usr/bin/env bash
## 导入参数
data=/home/fwq/Project/kaldi/kaldi/data
data_url=www.openslr.org/resources/12
lm_url=www.openslr.org/resources/11
mfccdir=mfcc
stage=1
. ./cmd.sh
. ./path.sh
. parse_options.sh
set -e
## 下载数据
if [ $stage -le 1 ]; then
for part in dev-clean test-clean dev-other test-other train-clean-100; do
local/download_and_untar.sh $data $data_url $part
done
local/download_lm.sh $lm_url data/local/lm
fi
## 生成各种数据的各种文件,如wav.scp,text,utt2spk,spk2gender,utt2dur
if [ $stage -le 2 ]; then
for part in dev-clean test-clean dev-other test-other train-clean-100; do
local/data_prep.sh $data/LibriSpeech/$part data/$(echo $part | sed s/-/_/g)
done
fi
## 准备语言模型,准备字典(local/prepare_dict_sh),准备语言相关数据(utils/prepare_lang.sh),格式化数据(local/format_lms.sh)
if [ $stage -le 3 ]; then
local/prepare_dict.sh --stage 3 --nj 30 --cmd "$train_cmd" \
data/local/lm data/local/lm data/local/dict_nosp
utils/prepare_lang.sh data/local/dict_nosp \
"" data/local/lang_tmp_nosp data/lang_nosp
local/format_lms.sh --src-dir data/lang_nosp data/local/lm
fi
## 用3-gram和4-gram语言模型创建ConstArpaLm格式语言模型
if [ $stage -le 4 ]; then
utils/build_const_arpa_lm.sh data/local/lm/lm_tglarge.arpa.gz \
data/lang_nosp data/lang_nosp_test_tglarge
utils/build_const_arpa_lm.sh data/local/lm/lm_fglarge.arpa.gz \
data/lang_nosp data/lang_nosp_test_fglarge
fi
## 数据特征提取,提取mfcc,计算每条wav文件的均值方差
if [ $stage -le 5 ]; then
if [[ $(hostname -f) == *.clsp.jhu.edu ]]; then
utils/create_split_dir.pl /export/b{02,11,12,13}/$USER/kaldi-data/egs/librispeech/s5/$mfcc/storage \
$mfccdir/storage
fi
fi
if [ $stage -le 6 ]; then
for part in dev_clean test_clean dev_other test_other train_clean_100; do
steps/make_mfcc.sh --cmd "$train_cmd" --nj 40 data/$part exp/make_mfcc/$part $mfccdir
steps/compute_cmvn_stats.sh data/$part exp/make_mfcc/$part $mfccdir
done
fi
## 训练100小时的小数据集
if [ $stage -le 7 ]; then
utils/subset_data_dir.sh --shortest data/train_clean_100 2000 data/train_2kshort
utils/subset_data_dir.sh data/train_clean_100 5000 data/train_5k
utils/subset_data_dir.sh data/train_clean_100 10000 data/train_10k
fi
## 训练单音素模型(mono)
if [ $stage -le 8 ]; then
steps/train_mono.sh --boost-silence 1.25 --nj 20 --cmd "$train_cmd" \
data/train_2kshort data/lang_nosp exp/mono
fi
## 对齐,训练三音素模型(tri1)
if [ $stage -le 9 ]; then
steps/align_si.sh --boost-silence 1.25 --nj 10 --cmd "$train_cmd" \
data/train_5k data/lang_nosp exp/mono exp/mono_ali_5k
steps/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" \
2000 10000 data/train_5k data/lang_nosp exp/mono_ali_5k exp/tri1
fi
## 对齐,对三音素做LDA+MLLT变换(tri2b)
if [ $stage -le 10 ]; then
steps/align_si.sh --nj 10 --cmd "$train_cmd" \
data/train_10k data/lang_nosp exp/tri1 exp/tri1_ali_10k
steps/train_lda_mllt.sh --cmd "$train_cmd" \
--splice-opts "--left-context=3 --right-context=3" 2500 15000 \
data/train_10k data/lang_nosp exp/tri1_ali_10k exp/tri2b
fi
## 对齐,对三音素做LDA+MLLT+SAT变换(tri3b)
if [ $stage -le 11 ]; then
steps/align_si.sh --nj 10 --cmd "$train_cmd" --use-graphs true \
data/train_10k data/lang_nosp exp/tri2b exp/tri2b_ali_10k
steps/train_sat.sh --cmd "$train_cmd" 2500 15000 \
data/train_10k data/lang_nosp exp/tri2b_ali_10k exp/tri3b
fi
## 对齐,对三音素做LDA+MLLT+SAT变换(tri4b)
if [ $stage -le 12 ]; then
steps/align_fmllr.sh --nj 20 --cmd "$train_cmd" \
data/train_clean_100 data/lang_nosp \
exp/tri3b exp/tri3b_ali_clean_100
steps/train_sat.sh --cmd "$train_cmd" 4200 40000 \
data/train_clean_100 data/lang_nosp \
exp/tri3b_ali_clean_100 exp/tri4b
fi
## 从训练数据中计算发音和静音概率,并重新创建lang目录
if [ $stage -le 13 ]; then
steps/get_prons.sh --cmd "$train_cmd" \
data/train_clean_100 data/lang_nosp exp/tri4b
utils/dict_dir_add_pronprobs.sh --max-normalize true \
data/local/dict_nosp \
exp/tri4b/pron_counts_nowb.txt exp/tri4b/sil_counts_nowb.txt \
exp/tri4b/pron_bigram_counts_nowb.txt data/local/dict
utils/prepare_lang.sh data/local/dict \
"" data/local/lang_tmp data/lang
local/format_lms.sh --src-dir data/lang data/local/lm
utils/build_const_arpa_lm.sh \
data/local/lm/lm_tglarge.arpa.gz data/lang data/lang_test_tglarge
utils/build_const_arpa_lm.sh \
data/local/lm/lm_fglarge.arpa.gz data/lang data/lang_test_fglarge
fi
## 对齐,训练nnet2模型,现在已经不这么用了,所以and了个false
if [ $stage -le 14 ] && false; then
steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \
data/train_clean_100 data/lang exp/tri4b exp/tri4b_ali_clean_100
local/nnet2/run_5a_clean_100.sh
fi
## 合并360小时的数据,变成460小时
if [ $stage -le 15 ]; then
local/download_and_untar.sh $data $data_url train-clean-360
local/data_prep.sh \
$data/LibriSpeech/train-clean-360 data/train_clean_360
steps/make_mfcc.sh --cmd "$train_cmd" --nj 40 data/train_clean_360 \
exp/make_mfcc/train_clean_360 $mfccdir
steps/compute_cmvn_stats.sh \
data/train_clean_360 exp/make_mfcc/train_clean_360 $mfccdir
utils/combine_data.sh \
data/train_clean_460 data/train_clean_100 data/train_clean_360
fi
## 对齐,做LDA+MLLT+SAT变换(tri5b)
if [ $stage -le 16 ]; then
steps/align_fmllr.sh --nj 40 --cmd "$train_cmd" \
data/train_clean_460 data/lang exp/tri4b exp/tri4b_ali_clean_460
steps/train_sat.sh --cmd "$train_cmd" 5000 100000 \
data/train_clean_460 data/lang exp/tri4b_ali_clean_460 exp/tri5b
fi
#local/nnet2/run_6a_clean_460.sh
## 合并500小时的数据,变成960小时
if [ $stage -le 17 ]; then
local/download_and_untar.sh $data $data_url train-other-500
local/data_prep.sh \
$data/LibriSpeech/train-other-500 data/train_other_500
steps/make_mfcc.sh --cmd "$train_cmd" --nj 40 data/train_other_500 \
exp/make_mfcc/train_other_500 $mfccdir
steps/compute_cmvn_stats.sh \
data/train_other_500 exp/make_mfcc/train_other_500 $mfccdir
utils/combine_data.sh \
data/train_960 data/train_clean_460 data/train_other_500
fi
## 对齐,做LDA+MLLT+SAT变换(tri6b),解码
if [ $stage -le 18 ]; then
steps/align_fmllr.sh --nj 40 --cmd "$train_cmd" \
data/train_960 data/lang exp/tri5b exp/tri5b_ali_960
steps/train_quick.sh --cmd "$train_cmd" \
7000 150000 data/train_960 data/lang exp/tri5b_ali_960 exp/tri6b
utils/mkgraph.sh data/lang_test_tgsmall \
exp/tri6b exp/tri6b/graph_tgsmall
for test in test_clean test_other dev_clean dev_other; do
steps/decode_fmllr.sh --nj 20 --cmd "$decode_cmd" \
exp/tri6b/graph_tgsmall data/$test exp/tri6b/decode_tgsmall_$test
steps/lmrescore.sh --cmd "$decode_cmd" data/lang_test_{tgsmall,tgmed} \
data/$test exp/tri6b/decode_{tgsmall,tgmed}_$test
steps/lmrescore_const_arpa.sh \
--cmd "$decode_cmd" data/lang_test_{tgsmall,tglarge} \
data/$test exp/tri6b/decode_{tgsmall,tglarge}_$test
steps/lmrescore_const_arpa.sh \
--cmd "$decode_cmd" data/lang_test_{tgsmall,fglarge} \
data/$test exp/tri6b/decode_{tgsmall,fglarge}_$test
done
fi
## 划分“好”的数据来训练数据(tri6b_cleaned)
if [ $stage -le 19 ]; then
local/run_cleanup_segmentation.sh
fi
## 训练和测试nnet3 tdnn模型
if [ $stage -le 20 ]; then
local/chain/run_tdnn.sh
fi
首先,新建自己数据库的文件夹,并设置steps、utils、rnnlm的软链接。
ln -s /home/fwq/Project/kaldi/kaldi/egs/wsj/s5/utils utils
ln -s /home/fwq/Project/kaldi/kaldi/egs/wsj/s5/steps steps
ln -s /home/fwq/Project/kaldi/kaldi/scripts/rnnlm rnnlm
然后开始准备自己的数据库,kaldi需要的文件如下,这部分需要根据自己的数据库格式来编写生成,放置在data/corpus_name/里,以下corpus命名为test。
utils/fix_data_dir.sh data/test
utils/utt2spk_to_spk2utt.pl data/test/utt2spk > data/test/spk2utt
for datadir in test; do
utils/copy_data_dir.sh data/$datadir data/${datadir}_hires
done
有了数据,就要准备生成mfcc特征,需要新建一个conf文件夹,并新建conf/mfcc_hires.conf的配置文件,添加如下:
-use-energy=false # use average of log energy, not energy.
--num-mel-bins=40 # similar to Google's setup.
--num-ceps=40 # there is no dimensionality reduction.
--low-freq=20 # low cutoff frequency for mel bins... this is high-bandwidth data, so
# there might be some information at the low end.
--high-freq=-400 # high cutoff frequently, relative to Nyquist of 8000 (=7600)
然后就可以为数据计算特征和CMVN统计信息。
for datadir in test; do
steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf --cmd "$train_cmd" data/${datadir}_hires
steps/compute_cmvn_stats.sh data/${datadir}_hires
utils/fix_data_dir.sh data/${datadir}_hires
done
接下来是预训练模型的下载和导入。默认情况下,内容将提取到data和exp目录。这里提供了2种语言模型:(tgsmall小三元组模型)和rnnlm(基于LSTM),这两种语言模型都经过LibriSpeech训练转录本的训练。我们将使用tgsmall模型进行解码,并使用RNNLM进行记录。
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_chain.tar.gz
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_extractor.tar.gz
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_lm.tar.gz
tar -xvzf 0013_librispeech_v1_chain.tar.gz
tar -xvzf 0013_librispeech_v1_extractor.tar.gz
tar -xvzf 0013_librispeech_v1_lm.tar.gz
使用i-vector提取器来获取测试数据的i-vector。 这会将100维i-向量提取到exp/nnet3_cleaned。
for data in test; do
nspk=$(wc -l <data/${data}_hires/spk2utt)
steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj "${nspk}" \
data/${data}_hires exp/nnet3_cleaned/extractor \
exp/nnet3_cleaned/ivectors_${data}_hires
done
使用tgsmallLM创建解码图。
export dir=exp/chain_cleaned/tdnn_1d_sp
export graph_dir=$dir/graph_tgsmall
utils/mkgraph.sh --self-loop-scale 1.0 --remove-oov \
data/lang_test_tgsmall $dir $graph_dir
使用创建的图形进行解码。
export decode_cmd="run.pl"
for decode_set in test; do
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \
--nj 8 --cmd "$decode_cmd" \
--online-ivector-dir exp/nnet3_cleaned/ivectors_${decode_set}_hires \
$graph_dir data/${decode_set}_hires $dir/decode_${decode_set}_tgsmall
done
在核对之前检查WER。在这里,我们使用sclite评分,这在Kaldi中可用,并用于大多数egs。
for decode_set in test; do
steps/score_kaldi.sh --cmd "run.pl" data/${decode_set}_hires $graph_dir $dir/decode_${decode_set}_tgsmall
done
cat exp/chain_cleaned/tdnn_1d_sp/decode_test_tgsmall/scoring_kaldi/best_wer
%WER 57.15 [ 14722 / 25761, 2501 ins, 2559 del, 9662 sub ] exp/chain_cleaned/tdnn_1d_sp/decode_test_tgsmall/wer_17_1.0
使用RNNLM重新评分。
export decode_cmd="run.pl"
for decode_set in test; do
decode_dir=exp/chain_cleaned/tdnn_1d_sp/decode_${decode_set}_tgsmall;
rnnlm/lmrescore_pruned.sh \
--cmd "$decode_cmd" \
--weight 0.45 --max-ngram-order 4 \
data/lang_test_tgsmall exp/rnnlm_lstm_1a \
data/${decode_set}_hires ${decode_dir} \
exp/chain_cleaned/tdnn_1d_sp/decode_${decode_set}_rescore
done
计分包含在lmrescore_pruned.sh脚本中。
cat exp/chain_cleaned/tdnn_1d_sp/decode_test_rescore/wer_17_1.0
# %WER 56.12 [ 14456 / 25761, 2607 ins, 2452 del, 9397 sub ]
不同于以往用的python,kaldi的初步使用通过shell脚本来实现,它基于c++的底层,通过社区不同发展,现在已经有了非常庞大的脚本库。很多函数都实现了高效的封装,但如果要自己提特征训模型的话,还需要细看shell代码进一步看c++代码。
参考1
参考2