将以下代码复制,挂在场景中(也可以将此脚本写成不继承mono的单例)
开始录音方法StartRecord。
停止录音方法StopRecord,参数为要保存的文件名,这里将文件保存到Application.persistentDataPath路径下
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System;
using System.IO;
///
/// unity录音的clip直接转byte系统无法正确生成音频文件,而且时间是定死的
/// 所以基本逻辑是:停止录音时,通过一定方法把有效byte转成可以生成的音频文件,再用www加载,传服务器
///
public class MicphoneRecord : MonoBehaviour
{
AudioSource _audio;
AudioSource audio
{
get
{
if (_audio == null)
{
_audio = gameObject.AddComponent();
}
return _audio;
}
}
void Start()
{
string[] ms = Microphone.devices;
deviceCount = ms.Length;
if (deviceCount == 0)
{
Debug.Log("no microphone found");
}
}
const int HEADER_SIZE = 44;
const int RECORD_TIME = 30;
int deviceCount;
string bitrate = "128";
const int frequency = 44100;
private string path_1;
public void StartRecord()
{
audio.Stop();
audio.loop = false;
audio.mute = true;
audio.clip = Microphone.Start(null, false, RECORD_TIME, frequency);
while (!(Microphone.GetPosition(null) > 0))
{
}
audio.Play();
}
public void StopRecord(string path)
{
if (!Microphone.IsRecording(null))
{
return;
}
Microphone.End(null);
audio.Stop();
//string path = "64_3_1.wav";
audio.clip = GetTransformAudioClip(audio.clip, 0);
SaveMusic(path);
HttpHelper.Instance.UpLoadRecordClip(path);
}
public void PlayRecord()
{
if (Microphone.IsRecording(null))
{
return;
}
if (audio.clip == null)
{
return;
}
audio.mute = false;
audio.loop = false;
audio.Play();
}
public void SaveMusic(string filename)
{
Save(filename, audio.clip);
}
public static bool Save(string filename, AudioClip clip)
{
//if (!filename.ToLower().EndsWith(".wav"))
//{
// filename = ".wav";
//}
if (!filename.EndsWith(".wav"))
{
filename = ".wav";
}
#if UNITY_IPHONE
// path_1 = Application.persistentDataPath;
string filepath = Path.Combine(Application.persistentDataPath, filename);
#endif
#if UNITY_STANDALONE_WIN
string filepath = filename;
#endif
#if UNITY_ANDROID
string filepath = Path.Combine(Application.persistentDataPath, filename);
#endif
// string filepath = Path.Combine(Application.persistentDataPath, filename);
Debug.Log(filepath);
// Make sure directory exists if user is saving to sub dir.
Directory.CreateDirectory(Path.GetDirectoryName(filepath));
using (FileStream fileStream = CreateEmpty(filepath))
{
ConvertAndWrite(fileStream, clip);
WriteHeader(fileStream, clip);
}
return true; // TODO: return false if there's a failure saving the file
}
static FileStream CreateEmpty(string filepath)
{
FileStream fileStream = new FileStream(filepath, FileMode.Create);
byte emptyByte = new byte();
for (int i = 0; i < HEADER_SIZE; i++) //preparing the header
{
fileStream.WriteByte(emptyByte);
}
return fileStream;
}
static void ConvertAndWrite(FileStream fileStream, AudioClip clip)
{
float[] samples = new float[clip.samples];
clip.GetData(samples, 0);
Int16[] intData = new Int16[samples.Length];
//converting in 2 float[] steps to Int16[], //then Int16[] to Byte[]
Byte[] bytesData = new Byte[samples.Length * 2];
//bytesData array is twice the size of
//dataSource array because a float converted in Int16 is 2 bytes.
int rescaleFactor = 32767; //to convert float to Int16
for (int i = 0; i < samples.Length; i++)
{
intData[i] = (short)(samples[i] * rescaleFactor);
Byte[] byteArr = new Byte[2];
byteArr = BitConverter.GetBytes(intData[i]);
byteArr.CopyTo(bytesData, i * 2);
}
fileStream.Write(bytesData, 0, bytesData.Length);
}
static void WriteHeader(FileStream fileStream, AudioClip clip)
{
int hz = clip.frequency;
int channels = clip.channels;
int samples = clip.samples;
fileStream.Seek(0, SeekOrigin.Begin);
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF");
fileStream.Write(riff, 0, 4);
Byte[] chunkSize = BitConverter.GetBytes(fileStream.Length - 8);
fileStream.Write(chunkSize, 0, 4);
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE");
fileStream.Write(wave, 0, 4);
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt ");
fileStream.Write(fmt, 0, 4);
Byte[] subChunk1 = BitConverter.GetBytes(16);
fileStream.Write(subChunk1, 0, 4);
UInt16 two = 2;
UInt16 one = 1;
Byte[] audioFormat = BitConverter.GetBytes(one);
fileStream.Write(audioFormat, 0, 2);
Byte[] numChannels = BitConverter.GetBytes(channels);
fileStream.Write(numChannels, 0, 2);
Byte[] sampleRate = BitConverter.GetBytes(hz);
fileStream.Write(sampleRate, 0, 4);
Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); // sampleRate * bytesPerSample*number of channels, here 44100*2*2
fileStream.Write(byteRate, 0, 4);
UInt16 blockAlign = (ushort)(channels * 2);
fileStream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
UInt16 bps = 16;
Byte[] bitsPerSample = BitConverter.GetBytes(bps);
fileStream.Write(bitsPerSample, 0, 2);
Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data");
fileStream.Write(datastring, 0, 4);
Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
fileStream.Write(subChunk2, 0, 4);
// fileStream.Close();
}
private AudioClip GetTransformAudioClip(AudioClip srcAudioClip, float minSecond)
{
var samples = new float[srcAudioClip.samples];
srcAudioClip.GetData(samples, 0);
return GetTransformAudioClip(new List(samples), minSecond, srcAudioClip.channels, srcAudioClip.frequency);
}
private AudioClip GetTransformAudioClip(List samples, float min, int channels, int hz)
{
return GetTransformAudioClip(samples, min, channels, hz, false, false);
}
private AudioClip GetTransformAudioClip(List samples, float min, int channels, int hz, bool _3D, bool stream)
{
int i;
bool isLessMin = true;
for (i = 0; i < samples.Count; i++)
{
if (Mathf.Abs(samples[i]) > min)
{
isLessMin = false;
break;
}
}
// less than the min seconds, so can't get the audio clip
if (isLessMin)
return null;
if (samples.Count > i)
{
samples.RemoveRange(0, i);
}
if (samples.Count > i)
{
samples.RemoveRange(0, i);
}
for (i = samples.Count - 1; i > 0; i--)
{
if (Mathf.Abs(samples[i]) > min)
{
break;
}
}
if (samples.Count > (samples.Count - i))
{
samples.RemoveRange(i, samples.Count - i);
}
var clip = AudioClip.Create("TempClip", samples.Count, channels, hz, _3D, stream);
clip.SetData(samples.ToArray(), 0);
return clip;
}
}