Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/javascript/417.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Javascript 如何在浏览器中录制和播放(html 5音频)_Javascript_Html5 Audio - Fatal编程技术网

Javascript 如何在浏览器中录制和播放(html 5音频)

Javascript 如何在浏览器中录制和播放(html 5音频),javascript,html5-audio,Javascript,Html5 Audio,我正在构建一个简单的html组件,它允许用户录制消息,然后立即播放。 目的是让他们在保存录音之前,如果不喜欢,可以放弃录音。 这些信息将少于60秒 因此,我有一个UI,允许我将音频录制到一组音频缓冲区中。 我现在正试图将这些缓冲区推回第二个音频控件,以便用户可以播放录音 这是一本书 这是核心回放功能,我认为这是问题的核心。 该解决方案必须在android和ios上的浏览器中工作,而不使用第三方库(这会让您的生活变得非常困难:) 关键问题是,即使播放器控件显示时间进度,音频也不会播放 我也有疑问,

我正在构建一个简单的html组件,它允许用户录制消息,然后立即播放。 目的是让他们在保存录音之前,如果不喜欢,可以放弃录音。 这些信息将少于60秒

因此,我有一个UI,允许我将音频录制到一组音频缓冲区中。 我现在正试图将这些缓冲区推回第二个音频控件,以便用户可以播放录音

这是一本书

这是核心回放功能,我认为这是问题的核心。 该解决方案必须在android和ios上的浏览器中工作,而不使用第三方库(这会让您的生活变得非常困难:)

关键问题是,即使播放器控件显示时间进度,音频也不会播放


我也有疑问,我是否应该使用两个播放器(一个用于录音,一个用于播放),或者我可以只使用一个音频元素?

所以这一个有点像噩梦,所以我想我会在这里发布完整的脚本

我从著名的recorder.js开始,但在适应我的需要时遇到了一些问题。最后我做了一次大的重构,使代码更容易理解。 与recorder.js不同,这段代码不使用工作线程(我真的不需要)

/**
*允许通过设备麦克风进行录音。
*/
课堂记录员
{
/**
*mono的约束设置。
*目前,我们正在修改它们。
*应该注意的是,这些设置在大多数情况下都会被忽略
*无论这些设置如何,我们都可以在44K采样仪上获得立体声。
*/
静态_约束={
音频:
{
信道计数:1,
mimeType:'音频/波形',
采样器:8192,
样本:8,
自动控制:对,
噪音抑制:是的,
是的,
}
};
建造商(所需渠道)
{
这。_desiredChannels=desiredChannels;
这个;
}
/*
*开始录音。
* 
*errorCallback(e)-启动失败时调用的函数。
* 
*/
开始(错误回调)
{
这个;
这是。_context=newaudiocontext();
//请求许可,如果给予
//将音频控制连接到媒体流。
领航员
.多媒体设备
.getUserMedia(记录器。\u约束)
。然后((流)=>此。_wireRecordingStream(流))
.catch(e=>errorCallback(e));
//todo:考虑给用户选择输入设备的能力。
}
/*
*停止当前活动的录制。
*/
停止()
{
如果(此._上下文!=null)
{
此._context.close();
这是。_context=null;
}
}
/**
*检查用户的手机是否支持媒体api
*/
hasGetUserMedia()
{
return!!(navigator.mediaDevices&&navigator.mediaDevices.getUserMedia);
}
/**
*返回包含录制的wav文件的Blob。
*/
getWav()
{
if(this.\u mergedChannelData==null)
这个;
设wav=新wav(此.\u合并通道数据,此.\u实际通道计数,此.\u实际采样);
返回wav.getBlob();
}
/**
*重置录音机,以便重新开始录音。
*/
_重置()
{
这是._channels=null;
这是.\u实际通道计数=-1;
//当记录开始到实际速率时,该值将被更新。
这个._实际采样率=-1;
//调用_mergeChannels后,它将包含
//基础通道的单个float32数组
//数据交错以创建单个音频流。
这是.\u mergedChannelData=null;
}
_initChannelBuffers(实际通道)
{
如果(此._通道==null)
{
这个。_通道=[];
这。_actualChannelCount=实际通道数;
对于(变量i=0;i<实际通道;i++)
{
此._channels.push(新通道());
}
}
}
/**
*start()方法使用此方法初始化媒体流
*然后连接“onaudioprocess”或捕捉录音。
*/
_wireRecordingStream(流)
{
// https://developers.google.com/web/fundamentals/media/recording-audio/
//设置录制。
this.\u source=this.\u context.createMediaStreamSource(流);
this._node=(this._context.createScriptProcessor | | this._context.createJavaScriptNode)
.call(this.\u context,4096,this.\u desiredChannels,this.\u desiredChannels);//4K缓冲区,我们更喜欢单通道。
//上下文可能忽略了我们首选的采样率。
this.\u actualSampleRate=this.\u context.sampleRate;
this.\u node.onaudioprocess=(e)=>this.\u storeAudio(e.inputBuffer);
this.\u source.connect(this.\u节点);
this.\u node.connect(this.\u context.destination);
}
/**
*这是“onaudioprocess”的回调,我们在其中存储记录的数据
*到每个通道缓冲区。
*/
_storeAudio(输入缓冲区)
{
这._initChannelBuffers(inputBuffer.numberOfChannels);
对于(var i=0;ifunction _playback(){
    let context = new AudioContext();
    var dest = context.createMediaStreamDestination();

    let source = context.createBufferSource(); // creates a sound source
    source.buffer = _appendBuffer(context, this._audioBuffer);  

    source.connect(dest);

    let player = window.getElementById("playback");
    player.srcObject = dest.stream;
    player.play();
}
    /**
 * Allows recording via the devices microphone.
 */

class Recorder
{
    /**
     * Constraints setup for mono.
     * Currently now to modify them.
     * It should be noted that these settings are ignored on most
     * systems and we get stereo at a 44K sampleRate regardless of these settings.
     */
    static  _constraints = {
              audio: 
                  {
                        channelCount: 1,
                        mimeType: 'audio/wav',
                        sampleRate: 8192,
                        sampleSize: 8,
                        autoGainControl: true,
                        noiseSuppression: true,
                        echoCancellation: true,
                  }
            };

    constructor(desiredChannels)
    {
        this._desiredChannels = desiredChannels;
        this._reset();
    }


    /*
     * Start recording.
     * 
     * errorCallback(e) - a function  that is called if the start fails.
     * 
     */
    start(errorCallback)
    {
        this._reset();
        this._context = new AudioContext();

        // request permission and if given
        // wire our audio control to the media stream.  
        navigator
            .mediaDevices
            .getUserMedia(Recorder._constraints)
            .then((stream) => this._wireRecordingStream(stream))
            .catch(e => errorCallback(e));

        // TODO: consider giving the user the ability to select an input device.
    }

    /*
     * Stops a currently active recording.
     */
    stop()
    {
        if (this._context != null)
        {
            this._context.close();
            this._context = null;
        }
    }

    /**
     * check if the user's phone supports media api
     */
    hasGetUserMedia() 
    {
          return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
    }

    /**
     * returns a Blob containing a wav file of the recording.  
     */
    getWav()
    {
        if (this._mergedChannelData == null)
            this._mergeChannels();

        let wav = new Wav(this._mergedChannelData, this._actualChannelCount, this._actualSampleRate);

        return wav.getBlob();
    }


    /**
     * resets the Recorder so we can restart the recording.
     */
    _reset()
    {
        this._channels = null;
        this._actualChannelCount = -1;

        // this will be updated when the recording starts to the actual rate.
        this._actualSampleRate = -1;

        // after _mergeChannels is called this will contain
        // a single float32 array of the underlying channel 
        // data interleaved to create a single audio stream.
        this._mergedChannelData = null;

    }


    _initChannelBuffers(actualChannels) 
    {
        if (this._channels == null)
        {
            this._channels = [];
            this._actualChannelCount = actualChannels;

            for (var i = 0; i < actualChannels; i++) 
            {
                this._channels.push(new Channel());
            }
        }
    }


    /**
     * The start() method uses this method to initialise the media stream
     * and wire up the 'onaudioprocess'or to capture the recording.
     */
    _wireRecordingStream(stream)
    {
        // https://developers.google.com/web/fundamentals/media/recording-audio/

        // Setup recording.
        this._source = this._context.createMediaStreamSource(stream);

        this._node = (this._context.createScriptProcessor || this._context.createJavaScriptNode)
            .call(this._context, 4096, this._desiredChannels, this._desiredChannels); // 4K buffer and we prefer a single (mono) channel.

        // the context may have ignored our preferred sample rate.
        this._actualSampleRate = this._context.sampleRate;

        this._node.onaudioprocess = (e) => this._storeAudio(e.inputBuffer);

        this._source.connect(this._node);
        this._node.connect(this._context.destination);
    }

    /**
     * This is the callback for 'onaudioprocess' where we store the recorded data 
     * to each channel buffer.
     */
    _storeAudio(inputBuffer) 
    {
        this._initChannelBuffers(inputBuffer.numberOfChannels);

        for (var i = 0; i < this._actualChannelCount; i++) 
        {
            this._channels[i].storeAudioPacket(inputBuffer.getChannelData(i));
        }
    }

    // Merges all channels into a single float32Array.
    // Channels are merged by interleaving data packet from each channel into a single stream.
    _mergeChannels() 
    {
        if (this._actualChannelCount === 2) 
        {
            this._mergedChannelData = this._interleave(this._channels[0], this._channels[1]);
        } 
        else 
        {
            this._mergedChannelData = this._channels[0].getAudioData();
        }
    }

    /**
     ** interleaves two channel buffers into a single float32 array.
     */
    _interleave(lhsChannel, rhsChannel) 
    {
        let length = lhsChannel.getLength() + rhsChannel.getLength();
        let result = new Float32Array(length);

        let index = 0;
        let inputIndex = 0;this._channels 

        let lhsData = lhsChannel.getAudioData();
        let rhsData = rhsChannel.getAudioData();

        while (index < length) 
        {
            result[index++] = lhsData[inputIndex];
            result[index++] = rhsData[inputIndex];
            inputIndex++;
        }
        return result;
    }
}

/**
 * Used to buffer audio data for a single channel.
 */
class Channel
{
    constructor()
    {
        /** 
         * the total no of Float32's stored in all of the audio packets.
         */
        this._length = 0;

        // an array of audio packets (Float32Array) captured as the recording progresses.
        // 
        this._audioPackets = [];

        // If flatten has been called this will be a Float32Array
        // contain all of the combined audio packets as  a single array.
        this._flattened = null;
    }

    getLength()
    {
        return this._length;
    }

    /**
     * returns a single audio packet stored at the given index.
     */
    getAudioPacket(index)
    {
        return this._audioPackets[index];
    }

    /**
     * returns the entire underlying data (Float32s) as a single Float32 array
     * If it hasn't already been done this method will call flatten to
     * combine all of the packets into a singl data array.
     */
    getAudioData()
    {
        if (this._flattened == null)
            this._flatten();

        return this._flattened;
    }

    // Stores an audioPacket (Float32Array) to _audioPackets
    storeAudioPacket(audioPacket)
    {
        this._audioPackets.push(new Float32Array(audioPacket));
        this._length += audioPacket.length;
    }

    /**
     * coalesce all of the _audioPackets into a single float32Array
     */
    _flatten() 
    {
        this._flattened = new Float32Array(this._length);
        let  offset = 0;
        for (let i = 0; i < this._audioPackets.length; i++) 
        {
            this._flattened.set(this._audioPackets[i], offset);
            offset += this._audioPackets[i].length;
        }
    }
}

/**
 * The logic for creating a wav file (well just the data structure actually) from
 * a stream of audioData
 * 
 * audioData - Float32Array containing the interleaved data from all channels.
 * channelCount - the number of channels interleaved into the audioData
 * sampleRate - the sampleRate of the audioData.
 */
class Wav
{
    /**
     * expects a single float32array from which it will create a wav file.
     */
    constructor(audioData, channelCount, sampleRate)
    {
        this._audioData = audioData;
        this._channelCount = channelCount;
        this._sampleRate = sampleRate;
    }

    /**
     * returns the wav file as a blob.
     */
    getBlob()
    {
        let wav = this._encodeAsWAV();
        let audioBlob = new Blob([wav], { type: "audio/wav" });

        return audioBlob;
    }

    /**
     * Encodes _audioData into a wav file by adding the 
     * standard wav header.
     */
    _encodeAsWAV() 
    {
        let audioData = this._audioData;

        var wavBuffer = new ArrayBuffer(44 + audioData.length * 2);
        var view = new DataView(wavBuffer);

        /* RIFF identifier */
        this._writeString(view, 0, 'RIFF');
        /* RIFF chunk length */
        view.setUint32(4, 36 + audioData.length * 2, true);
        /* RIFF type */
        this._writeString(view, 8, 'WAVE');
        /* format chunk identifier */
        this._writeString(view, 12, 'fmt ');
        /* format chunk length */
        view.setUint32(16, 16, true);
        /* sample format (raw) */
        view.setUint16(20, 1, true);
        /* channel count */
        view.setUint16(22, this._channelCount, true);
        /* sample rate */
        view.setUint32(24, this._sampleRate, true);
        /* byte rate (sample rate * block align) */
        view.setUint32(28, this._sampleRate * 4, true);
        /* block align (channel count * bytes per sample) */
        view.setUint16(32, this._channelCount * 2, true);
        /* bits per sample */
        view.setUint16(34, 16, true);
        /* data chunk identifier */
        this._writeString(view, 36, 'data');
        /* data chunk length */
        view.setUint32(40, audioData.length * 2, true);

        this._floatTo16BitPCM(view, 44, audioData);

        return view;
    }

    _floatTo16BitPCM(output, offset, input) 
    {
        for (var i = 0; i < input.length; i++, offset += 2) 
        {
            var s = Math.max(-1, Math.min(1, input[i]));
            output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
        }
    }

    _writeString(view, offset, string) 
    {
        for (var i = 0; i < string.length; i++) 
        {
            view.setUint8(offset + i, string.charCodeAt(i));
        }
    }
}