代码由ChatGPT 3.5 生成
import pyaudio
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import wave
# Constants for audio input
FORMAT = pyaudio.paInt16
CHANNELS = 1 # Mono audio
RATE = 44100 # Sample rate (samples per second)
CHUNK = 4096 # Number of frames per buffer
WAVE_OUTPUT_FILENAME = 'audio_output.wav'
# Initialize PyAudio
audio = pyaudio.PyAudio()
# Open a stream to capture audio from the microphone
stream = audio.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# Initialize the plot for real-time waveform display
plt.ion() # Turn on interactive mode for real-time updating
fig, ax = plt.subplots()
x = np.arange(0, CHUNK)
line, = ax.plot(x, np.zeros(CHUNK))
ax.set_xlim(0, CHUNK)
ax.set_ylim(-32768, 32767) # Assuming 16-bit audio
# Create a wave file to save the audio
wave_output_file = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wave_output_file.setnchannels(CHANNELS)
wave_output_file.setsampwidth(audio.get_sample_size(FORMAT))
wave_output_file.setframerate(RATE)
# Function to update the plot with the latest audio data
def update_plot(data):
line.set_ydata(data)
fig.canvas.draw()
fig.canvas.flush_events()
# Function to continuously capture and display audio
def display_audio_waveform():
while True:
try:
audio_data = np.frombuffer(stream.read(CHUNK), dtype=np.int16)
update_plot(audio_data)
wave_output_file.writeframes(audio_data)
except KeyboardInterrupt:
break
# Start displaying the audio waveform
display_audio_waveform()
# Close the audio stream and terminate PyAudio
stream.stop_stream()
stream.close()
audio.terminate()
# Close the wave file
wave_output_file.close()
print('Audio saved to', WAVE_OUTPUT_FILENAME)