Javascript 使用HTML5录制语音并使用ffmpeg进行处理

Javascript 使用HTML5录制语音并使用ffmpeg进行处理,javascript,ffmpeg,html5-audio,Javascript,Ffmpeg,Html5 Audio,我需要在我的javascript/HTML5项目中使用ffmpeg,该项目允许用户选择他想要打开音频的格式。我对ffmpeg一无所知,我做了很多研究,不知道如何在我的项目中使用它。我找到了一个例子,但问题是如何安装ffmpeg.js,这是一个8mgtom项目。如果有人能帮我,我会非常感激的 这是我的全部代码: javascript页面: // variables var leftchannel = []; var rightchannel = []; var recorder = null; v

我需要在我的javascript/HTML5项目中使用ffmpeg,该项目允许用户选择他想要打开音频的格式。我对ffmpeg一无所知,我做了很多研究,不知道如何在我的项目中使用它。我找到了一个例子,但问题是如何安装ffmpeg.js,这是一个8mgtom项目。如果有人能帮我,我会非常感激的 这是我的全部代码:

javascript页面:

// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;



if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia || 
                         navigator.msGetUserMedia;

if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');



function getVal(value)
  {

// if R is pressed, we start recording
if ( value == "record"){
    recording = true;
    // reset the buffers for the new recording
    leftchannel.length = rightchannel.length = 0;
    recordingLength = 0;
    document.getElementById('output').innerHTML="Recording now...";

 // if S is pressed, we stop the recording and package the WAV file
 } else if ( value == "stop" ){

    // we stop recording
    recording = false;
    document.getElementById('output').innerHTML="Building wav file...";

    // we flat the left and right channels down
    var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
    var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
    // we interleave both channels together
    var interleaved = interleave ( leftBuffer, rightBuffer );



    var buffer = new ArrayBuffer(44 + interleaved.length * 2);
    var view = new DataView(buffer);

    // RIFF chunk descriptor
    writeUTFBytes(view, 0, 'RIFF');
    view.setUint32(4, 44 + interleaved.length * 2, true);
    writeUTFBytes(view, 8, 'WAVE');
    // FMT sub-chunk
    writeUTFBytes(view, 12, 'fmt ');
    view.setUint32(16, 16, true);
    view.setUint16(20, 1, true);
    // stereo (2 channels)
    view.setUint16(22, 2, true);
    view.setUint32(24, sampleRate, true);
    view.setUint32(28, sampleRate * 4, true);
    view.setUint16(32, 4, true);
    view.setUint16(34, 16, true);
    // data sub-chunk
    writeUTFBytes(view, 36, 'data');
    view.setUint32(40, interleaved.length * 2, true);


    var lng = interleaved.length;
    var index = 44;
    var volume = 1;
    for (var i = 0; i < lng; i++){
        view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
        index += 2;
    }

    var blob = new Blob ( [ view ], { type : 'audio/wav' } );

    // let's save it locally

    document.getElementById('output').innerHTML='Handing off the file now...';
    var url = (window.URL || window.webkitURL).createObjectURL(blob);

    var li = document.createElement('li');
    var au = document.createElement('audio');
    var hf = document.createElement('a');

    au.controls = true;
    au.src = url;
    hf.href = url;
    hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
    hf.innerHTML = hf.download;
    li.appendChild(au);
    li.appendChild(hf);
    recordingList.appendChild(li);

}
}


function success(e){

audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();


volume = context.createGain();

// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);

// connect the stream(source) to the gain node
source.connect(volume);

var bufferSize = 2048;

recorder = context.createScriptProcessor(bufferSize, 2, 2);

//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;

splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){

    if (!recording) return;
    var left = e.inputBuffer.getChannelData (0);
    var right = e.inputBuffer.getChannelData (1);

    leftchannel.push (new Float32Array (left));
    rightchannel.push (new Float32Array (right));
    recordingLength += bufferSize;

    // get the average for the first channel
    var array =  new Uint8Array(analyser.frequencyBinCount);
    analyser.getByteFrequencyData(array);

    var c=document.getElementById("myCanvas");
    var ctx = c.getContext("2d");
    // clear the current state
    ctx.clearRect(0, 0, 1000, 325);
    var gradient = ctx.createLinearGradient(0,0,0,300);
    gradient.addColorStop(1,'#000000');
    gradient.addColorStop(0.75,'#ff0000');
    gradient.addColorStop(0.25,'#ffff00');
    gradient.addColorStop(0,'#ffffff');
    // set the fill style
    ctx.fillStyle=gradient;
    drawSpectrum(array);
    function drawSpectrum(array) {
        for ( var i = 0; i < (array.length); i++ ){
                var value = array[i];
                ctx.fillRect(i*5,325-value,3,325);
            } 

    }
}

function getAverageVolume(array) {
    var values = 0;
    var average;

    var length = array.length;

    // get all the frequency amplitudes
    for (var i = 0; i < length; i++) {
        values += array[i];
    }

    average = values / length;
    return average;
}

    // we connect the recorder(node to destination(speakers))
    volume.connect(splitter);
    splitter.connect(analyser, 0, 0);

    analyser.connect(recorder);
    recorder.connect(context.destination);

}




function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
    return result;
   }

function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);

var inputIndex = 0;

for (var index = 0; index < length; ){
 result[index++] = leftChannel[inputIndex];
 result[index++] = rightChannel[inputIndex];
 inputIndex++;
}
return result;
}


function writeUTFBytes(view, offset, string){ 
var lng = string.length;
for (var i = 0; i < lng; i++){

view.setUint8(offset + i, string.charCodeAt(i));
}
}
//变量
var leftchannel=[];
var rightchannel=[];
var记录器=空;
var记录=错误;
var-recordingLength=0;
var体积=null;
var audioInput=null;
var-sampleRate=44100;
var audioContext=null;
var-context=null;
变量输出字符串;
如果(!navigator.getUserMedia)
navigator.getUserMedia=navigator.getUserMedia||
navigator.webkitGetUserMedia||
navigator.mozGetUserMedia |
navigator.msGetUserMedia;
if(navigator.getUserMedia){
getUserMedia({audio:true},成功,函数(e){
警报(“捕获音频时出错”);
});
}else警报(“此浏览器不支持getUserMedia”);
函数getVal(值)
{
//如果按下R,我们开始录音
如果(值=“记录”){
记录=真;
//重置新录制的缓冲区
leftchannel.length=rightchannel.length=0;
记录长度=0;
document.getElementById('output').innerHTML=“立即录制…”;
//如果按S键,我们将停止录制并打包WAV文件
}否则如果(值=“停止”){
//我们停止录音
记录=假;
document.getElementById('output').innerHTML=“构建wav文件…”;
//我们把左右声道调平
var leftBuffer=mergeBuffers(leftchannel,recordingLength);
var rightBuffer=mergeBuffers(righchannel,recordingLength);
//我们将两个通道交织在一起
var interleaved=交织(leftBuffer,righbuffer);
var buffer=new ArrayBuffer(44+interleaved.length*2);
变量视图=新数据视图(缓冲区);
//RIFF块描述符
writeUTFBytes(视图,0,'RIFF');
view.setUint32(4,44+交错.length*2,true);
写入单位(视图,8,'波形');
//FMT子块
可写字节(视图,12,‘fmt’);
view.setUint32(16,16,true);
view.setUint16(20,1,true);
//立体声(2个频道)
view.setUint16(22,2,true);
视图。setUint32(24,采样器,真);
视图设置32(28,采样器*4,真);
view.setUint16(32,4,true);
view.setUint16(34,16,true);
//数据子块
写入字节(视图,36,‘数据’);
view.setUint32(40,交错。长度*2,真);
var lng=交错长度;
var指数=44;
var体积=1;
对于(var i=0;i<!DOCTYPE html>
<html>

<head>
    <meta charset="utf-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
    <title>Simple Web Audio Recorder</title>
    <script src="js/functions.js"></script>
   <link href="css/style.css" rel="stylesheet" type="text/css" />
</head>
 <body>
    <input type="button" value="record" onclick="getVal(this.value)">
    <input type="button" value="stop" onclick="getVal(this.value)">
    <p id="output"></p>
<ul id="recordingList"></ul>
<canvas id="myCanvas" width="1000" height="325" style="display: block;"></canvas>

</body>
</html>
def create_prelisten_aac(mp3, aac):
    """
    Run en-code for a single file

    Do 48 kbit files for prelisten.
    """
    cmdline = [ FFMPEG, '-y', '-i', mp3, '-acodec', 'libfaac', '-ar', '22050', '-ac', '1', '-ab', '48000', aac ]
    return subprocess.call(cmdline)