本文以录音为主,音频只调用其play()方法进行播放
react native sound 更多使用前往
录音:https://github.com/jsierles/react-native-audio
音频:https://github.com/zmxv/react-native-sound
安装
yarn add react-native-audio react-native-sound
添加依赖
react-native link
or
react-native link react-native-audio
react-native link react-native-sound
iOS:
Info.plist文件下添加
NSMicrophoneUsageDescription
This sample uses the microphone to record your speech and convert it to text.
Android:
AndroidManifest.xml 添加权限
引用
import {AudioRecorder, AudioUtils} from 'react-native-audio'
import Sound from 'react-native-sound'
AudioRecorder 下的方法
requestAuthorization 请求授权
checkAuthorizationStatus 检查授权状态
prepareRecordingAtPath 录制路径
startRecording 开始录音
pauseRecording 暂停录音
resumeRecording 恢复录音
stopRecording 停止录音
onProgress 录音进展
onFinished 完成录音
prepareRecordingPath(path,options) 接收两个参数
- path 路径 - option 参数
options 参数:
SampleRate: 44100.0, //采样率
Channels: 2, //通道
AudioQuality: 'High', //音质(Low, Medium, High)
AudioEncoding: 'aac', //音频编码(aac编码iOS和Android均支持)
OutputFormat: 'mpeg_4', //输出格式
MeteringEnabled: false, //是否计量
MeasurementMode: false, //测量模式
AudioEncodingBitRate: 32000, //音频编码比特率
IncludeBase64: true, //是否是base64格式
AudioSource: 0, //音频源
iOS支持的编码类型:lpcm, ima4, aac, MAC3, MAC6, ulaw, alaw, mp1, mp2, alac, amr
android支持的编码类型:aac, aac_eld, amr_nb, amr_wb, he_aac, vorbis
AudioUtils 下的属性
* AudioUtils 文件存放路径
* CachesDirectoryPath 缓存目录路径
* DocumentDirectoryPath 文档目录路径
* LibraryDirectoryPath 图书馆目录路径
* MainBundlePath 主路径
Sound 播放文件
let whoosh = new Sound(path, '', (err) => {
if(err) {
return console.log(err)
}
whoosh.play(success => {
if(success) {
console.log('success - 播放成功')
}else {
console.log('fail - 播放失败')
}
})
})
import React, { Component } from 'react';
import { View, Text, StyleSheet } from 'react-native';
import Sound from 'react-native-sound';
import {AudioRecorder, AudioUtils} from 'react-native-audio';
export default class App extends Component {
constructor(props) {
super(props);
this.state = {
hasPermission: undefined, //授权状态
audioPath: AudioUtils.DocumentDirectoryPath + '/test.aac', // 文件路径
recording: false, //是否录音
pause: false, //录音是否暂停
stop: false, //录音是否停止
currentTime: 0, //录音时长
};
}
componentDidMount() {
// 请求授权
AudioRecorder.requestAuthorization()
.then(isAuthor => {
console.log('是否授权: ' + isAuthor)
if(!isAuthor) {
return alert('请前往设置开启录音权限')
}
this.setState({hasPermission: isAuthor})
this.prepareRecordingPath(this.state.audioPath);
// 录音进展
AudioRecorder.onProgress = (data) => {
this.setState({currentTime: Math.floor(data.currentTime)});
};
// 完成录音
AudioRecorder.onFinished = (data) => {
// data 返回需要上传到后台的录音数据
console.log(this.state.currentTime)
console.log(data)
};
})
};
/**
* AudioRecorder.prepareRecordingAtPath(path,option)
* 录制路径
* path 路径
* option 参数
*/
prepareRecordingPath = (path) => {
const option = {
SampleRate: 44100.0, //采样率
Channels: 2, //通道
AudioQuality: 'High', //音质
AudioEncoding: 'aac', //音频编码
OutputFormat: 'mpeg_4', //输出格式
MeteringEnabled: false, //是否计量
MeasurementMode: false, //测量模式
AudioEncodingBitRate: 32000, //音频编码比特率
IncludeBase64: true, //是否是base64格式
AudioSource: 0, //音频源
}
AudioRecorder.prepareRecordingAtPath(path,option)
}
// 开始录音
_record = async () => {
if(!this.state.hasPermission) {
return alert('没有授权')
}
if(this.state.recording) {
return alert('正在录音中...')
}
if(this.state.stop) {
this.prepareRecordingPath(this.state.audioPath)
}
this.setState({recording: true,pause: false})
try {
await AudioRecorder.startRecording()
} catch (err) {
console.log(err)
}
}
// 暂停录音
_pause = async () => {
if(!this.state.recording) {
return alert('当前未录音')
}
try {
await AudioRecorder.pauseRecording()
this.setState({pause: true, recording: false})
} catch (err) {
console.log(err)
}
}
// 恢复录音
_resume = async () => {
if(!this.state.pause) {
return alert('录音未暂停')
}
try {
await AudioRecorder.resumeRecording();
this.setState({pause: false, recording: true})
} catch (err) {
console.log(err)
}
}
// 停止录音
_stop = async () => {
this.setState({stop: true, recording: false, paused: false});
try {
await AudioRecorder.stopRecording();
} catch (error) {
console.error(error);
}
}
// 播放录音
_play = async () => {
let whoosh = new Sound(this.state.audioPath, '', (err) => {
if(err) {
return console.log(err)
}
whoosh.play(success => {
if(success) {
console.log('success - 播放成功')
}else {
console.log('fail - 播放失败')
}
})
})
}
render() {
let { recording, pause, currentTime } = this.state
return (
Record(开始录音)
Pause(暂停录音)
Resume(恢复录音)
Stop(停止录音)
Play(播放录音)
{
recording ? '正在录音' :
pause ? '已暂停' : '未开始'
}
时长: {currentTime}
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
},
text: {
fontSize: 18,
marginVertical: 10,
}
})
效果