<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>web 语音通话</title>
</head>
<body>
<audio id="audio"></audio>
</body>
<script>
// 模拟一个 MyWS 类,实现 websocket 的功能
class MyWS {
constructor() {
}
onmessage() { }
send(val) {
setTimeout(() => {
this.onmessage(val)
})
}
}
var ws = null // 实现WebSocket
var record = null // 多媒体对象,用来处理音频
var audio = null // 标签
var audioCtx = new window.AudioContext //创建环境
var analyser = audioCtx.createAnalyser() //analyser分析器
var gainNode = audioCtx.createGain() //控制音量大小
var Brecorder = audioCtx.createScriptProcessor(8192, 2, 2) //缓冲区音频处理模块
var rate = 16000 //采样率
function init(rec) {
record = rec
}
// 录音对象
function Recorder(stream) {
var sampleBits = 16 // 输出采样数位 8, 16
var sampleRate = rate // 输出采样率
var context = new AudioContext()
var audioInput = context.createMediaStreamSource(stream) // 创建一个MediaStreamAudioSourceNode接口来关联可能来自本地计算机麦克风或其他来源的音频流MediaStream
var recorder = context.createScriptProcessor(8192, 1, 1) // 创建一个ScriptProcessorNode 用于通过JavaScript直接处理音频.
var audioData = {
size: 0, // 录音文件长度
buffer: [], // 录音缓存
inputSampleRate: 48000, // 输入采样率
inputSampleBits: 16, // 输入采样数位 8, 16
outputSampleRate: sampleRate, // 输出采样率
oututSampleBits: sampleBits, // 输出采样数位
clear: function () {
this.buffer = []
this.size = 0
},
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // 合并压缩
// 合并
var data = new Float32Array(this.size)
var offset = 0
for (var i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 压缩
var compression = parseInt(this.inputSampleRate / this.outputSampleRate) // compression = 6
var length = data.length / compression // length = 8192 / 6 = 2730.6666
var result = new Float32Array(length)
var index = 0
var j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodePCM: function () { // 这里不对采集到的数据进行其他格式处理,如有需要均交给服务器端处理。
var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
var bytes = this.compress()
var dataLength = bytes.length * (sampleBits / 8)
var buffer = new ArrayBuffer(dataLength)
var data = new DataView(buffer)
var offset = 0
for (var i = 0; i < bytes.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
return new Blob([data])
}
}
var sendData = function () { // 对以获取的数据进行处理(分包)
var reader = new FileReader() // 要用于将文件内容读入内存,通过一系列异步接口,可以在主线程中访问本地文件
// 当读取操作成功完成时调用
reader.onload = e => {
var outbuffer = e.target.result
ws.send(outbuffer)
}
reader.readAsArrayBuffer(audioData.encodePCM()) // 异步按字节读取文件内容,结果用ArrayBuffer对象表示
audioData.clear() // 每次发送完成则清理掉旧数据
}
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
this.stop = function () {
recorder.disconnect()
}
this.getBlob = function () {
return audioData.encodePCM()
}
this.clear = function () {
audioData.clear()
}
recorder.onaudioprocess = function (e) {
var inputBuffer = e.inputBuffer.getChannelData(0)
audioData.input(inputBuffer)
sendData()
// ws.send(audioData.encodePCM())
// audioData.clear()
}
}
// websocket
function useWebSocket() {
ws = new MyWS()
record.start()
ws.onmessage = function (msg) {
console.log(msg)
playAuido(new Blob([msg]))
}
}
// 解码
function addWavHeader(samples, sampleRateTmp, sampleBits, channelCount) {
var dataLength = samples.byteLength
var buffer = new ArrayBuffer(44 + dataLength)
var view = new DataView(buffer)
function writeString(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i))
}
}
var offset = 0
/* 资源交换文件标识符 */
writeString(view, offset, 'RIFF')
offset += 4
/* 下个地址开始到文件尾总字节数,即文件大小-8 */
view.setUint32(offset, /*32*/ 36 + dataLength, true)
offset += 4
/* WAV文件标志 */
writeString(view, offset, 'WAVE')
offset += 4
/* 波形格式标志 */
writeString(view, offset, 'fmt ')
offset += 4
/* 过滤字节,一般为 0x10 = 16 */
view.setUint32(offset, 16, true)
offset += 4
/* 格式类别 (PCM形式采样数据) */
view.setUint16(offset, 1, true)
offset += 2
/* 通道数 */
view.setUint16(offset, channelCount, true)
offset += 2
/* 采样率,每秒样本数,表示每个通道的播放速度 */
view.setUint32(offset, sampleRateTmp, true)
offset += 4
/* 波形数据传输率 (每秒平均字节数) 通道数×每秒数据位数×每样本数据位/8 */
view.setUint32(offset, sampleRateTmp * channelCount * (sampleBits / 8), true)
offset += 4
/* 快数据调整数 采样一次占用字节数 通道数×每样本的数据位数/8 */
view.setUint16(offset, channelCount * (sampleBits / 8), true)
offset += 2
/* 每样本数据位数 */
view.setUint16(offset, sampleBits, true)
offset += 2
/* 数据标识符 */
writeString(view, offset, 'data')
offset += 4
/* 采样数据总数,即数据总大小-44 */
view.setUint32(offset, dataLength, true)
offset += 4
function floatTo32BitPCM(output, offset, input) {
input = new Int32Array(input)
for (var i = 0; i < input.length; i++, offset += 4) {
output.setInt32(offset, input[i], true)
}
}
function floatTo16BitPCM(output, offset, input) {
input = new Int16Array(input)
for (var i = 0; i < input.length; i++, offset += 2) {
output.setInt16(offset, input[i], true)
}
}
function floatTo8BitPCM(output, offset, input) {
input = new Int8Array(input)
for (var i = 0; i < input.length; i++, offset++) {
output.setInt8(offset, input[i], true)
}
}
if (sampleBits == 16) {
if (samples.byteLength % 2 === 0)
floatTo16BitPCM(view, 44, samples)
} else if (sampleBits == 8) {
floatTo8BitPCM(view, 44, samples)
} else {
floatTo32BitPCM(view, 44, samples)
}
return view.buffer
}
// 播放音频
function playAuido(blob) {
let fr = new FileReader();
fr.readAsArrayBuffer(blob);
fr.onload = function (data) {
let result = data.target.result;
//解码ArrayBuffer
audioCtx.decodeAudioData(addWavHeader(result, rate, 16, 1), getBuffer);
};
}
function getBuffer(audioBuffer) {
//创建对象,用过AudioBuffer对象来播放音频数据
let source = audioCtx.createBufferSource();
source.buffer = audioBuffer;
//将source与analyser分析器连接
source.connect(analyser);
//将analyser与gainNode分析器连接
analyser.connect(gainNode);
//音量控制器与输出设备链接
gainNode.connect(Brecorder);
Brecorder.connect(audioCtx.destination);
//播放
source.start(0);
//音频采集
Brecorder.onaudioprocess = function (e) {
/*输入流,必须要链接到输出流,audioCtx.destination才能有输出*/
let inputBuffer = e.inputBuffer,
outputBuffer = e.outputBuffer;
outputBuffer.copyToChannel(inputBuffer.getChannelData(0), 0, 0);
outputBuffer.copyToChannel(inputBuffer.getChannelData(1), 1, 0);
};
}
// 挂断电话
function closeCall() {
record.stop()
}
// 开始对讲
function beginCall(audioDom) {
audio = audioDom
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia;
if (!navigator.getUserMedia) {
alert('浏览器不支持音频输入')
} else {
navigator.getUserMedia({
audio: true
},
function (mediaStream) {
init(new Recorder(mediaStream))
console.log('开始对讲')
useWebSocket()
},
function (error) {
console.log(error)
switch (error.message || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
console.info('用户拒绝提供信息。')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
console.info('浏览器不支持硬件设备。')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
console.info('无法发现指定的硬件设备。')
break
default:
console.info('无法打开麦克风。异常信息:' + (error.code || error.name))
break
}
}
)
}
}
function stringToByte(str) {
var bytes = new Array();
var len, c;
len = str.length;
for (var i = 0; i < len; i++) {
c = str.charCodeAt(i);
if (c >= 0x010000 && c <= 0x10FFFF) {
bytes.push(((c >> 18) & 0x07) | 0xF0);
bytes.push(((c >> 12) & 0x3F) | 0x80);
bytes.push(((c >> 6) & 0x3F) | 0x80);
bytes.push((c & 0x3F) | 0x80);
} else if (c >= 0x000800 && c <= 0x00FFFF) {
bytes.push(((c >> 12) & 0x0F) | 0xE0);
bytes.push(((c >> 6) & 0x3F) | 0x80);
bytes.push((c & 0x3F) | 0x80);
} else if (c >= 0x000080 && c <= 0x0007FF) {
bytes.push(((c >> 6) & 0x1F) | 0xC0);
bytes.push((c & 0x3F) | 0x80);
} else {
bytes.push(c & 0xFF);
}
}
return bytes;
}
beginCall(document.querySelector('#audio'))
setTimeout(()=>{
closeCall()
},10000)
</script>
</html>