代码实现
class AudioTools {
private actx: AudioContext
constructor() {
const AudioContext = window.AudioContext || (window as any).webkitAudioContext;
this.actx = new AudioContext()
}
public volumeDetection(stream: MediaStream, fn: (volumeDB: number, audioBuffer: AudioBuffer) => void) {
const source = this.actx.createMediaStreamSource(stream)
const scriptProcessor = this.actx.createScriptProcessor(4096, 1, 1)
const analyser = this.actx.createAnalyser()
/** 创建一个媒体流目的地,不直接使用原始流。原始流不会经过分析处理,所以在录制时会出现丢失有效声音的情况。 **/
const dest = this.actx.createMediaStreamDestination()
analyser.smoothingTimeConstant = 0.8
analyser.fftSize = 2048
/** 连接分析器 **/
source.connect(analyser)
/** 连接处理器 **/
analyser.connect(scriptProcessor)
/** 如果没有创建媒体流目的地,则必须使用默认的媒体流目的地 **/
// scriptProcessor.connect(this.actx.destination)
/** 输出处理后的媒体流 **/
scriptProcessor.connect(dest)
const audioprocess = (evt: AudioWorklet | AudioProcessingEvent) => {
const inputBuffer = (evt as AudioProcessingEvent).inputBuffer
const outputBuffer = (evt as AudioProcessingEvent).outputBuffer
/** must copy inputbuffer to outputbuffer, otherwise outoutbuffer will be empty. **/
for (let channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
const ibuf = inputBuffer.getChannelData(channel)
const obuf = outputBuffer.getChannelData(channel)
for (let i = 0; i < ibuf.byteLength; i++) obuf[i] = ibuf[i]
}
const frequency = new Uint8Array(analyser.frequencyBinCount)
analyser.getByteFrequencyData(frequency)
let sum = 0
for (const i of frequency) sum += i * i
const rms = Math.sqrt(sum / frequency.length);
const volumeDB = rms
fn(volumeDB, inputBuffer);
}
scriptProcessor.addEventListener('audioprocess', audioprocess)
return dest.stream
}
public createStreamFormAudioBuffers(...audioBuffers: AudioBuffer[]) {
if (!audioBuffers.length) return
const dest = this.actx.createMediaStreamDestination()
const source = this.actx.createBufferSource()
const buffers: any[][] = []
let numOfChannels = 0, sampleRate = 0
for (const audioBuffer of audioBuffers) {
numOfChannels = Math.max(audioBuffer.numberOfChannels, numOfChannels)
sampleRate = Math.max(audioBuffer.sampleRate, sampleRate)
}
for (let i = 0; i < numOfChannels; i++) buffers[i] = []
for (const audioBuffer of audioBuffers) {
for (let i = 0; i < numOfChannels; i++) {
let buffer: any
if (i < audioBuffer.numberOfChannels) {
audioBuffer.getChannelData(i)
buffer = audioBuffer.getChannelData(i)
} else {
const length = audioBuffer.getChannelData(0).byteLength
buffer = new Float32Array(length)
buffer.fill(0)
}
buffers[i] = buffers[i].concat(Array.from(buffer))
}
}
const buffer = this.actx.createBuffer(numOfChannels, buffers[0].length, sampleRate);
for (let channel = 0; channel < buffer.numberOfChannels; channel++) {
const buffering = buffer.getChannelData(channel)
for (let i = 0; i < buffer.length; i++) {
buffering[i] = buffers[channel][i]
}
}
source.buffer = buffer
source.connect(dest)
source.start()
return dest.stream
}
}
使用示例
private async inboundVoiceSaver(originStream: MediaStream) {
const bits = 48000
const chunks: any[] = []
let recorder: MediaRecorder | undefined
let timer: any
const stream = MediaStudio.audioTools().volumeDetection(originStream, (db) => {
/** 人声一般在35分贝以上 **/
if (db > 40) {
if (timer) clearTimeout(timer)
timer = setTimeout(() => {
if (recorder?.state === 'recording') recorder.stop();
}, 3 * 1000)
if (recorder?.state !== 'recording') recorder?.start(1000)
}
})
recorder = MediaStudio.mediaRecorder(stream, {
audioBitsPerSecond: bits,
})
if (!recorder) return
recorder.ondataavailable = (e) => {
chunks.push(e.data);
}
recorder.onerror = () => {
throw 'save_failed'
}
recorder.onstop = async (e) => {
const blob = new Blob(chunks, { 'type': recorder!.mimeType })
chunks.splice(0, chunks.length);
if (blob.size * 8 < bits) return
if (!this.groupIndex) return
/** save audio file **/
await this.saveAudio(this.groupIndex, blob);
// const a = document.createElement('a')
// a.href = URL.createObjectURL(blob)
// a.download = `${ new Date().toISOString() }.webm`
// a.click()
}
}
参考资料