web音频api录制功能
我有以下代码。。。从现场 它工作得很好。。。但我需要记录功能,这将有助于记录以前点击的按钮,并存储在那里的音频太。。。有什么帮助吗web音频api录制功能,api,web-audio-api,Api,Web Audio Api,我有以下代码。。。从现场 它工作得很好。。。但我需要记录功能,这将有助于记录以前点击的按钮,并存储在那里的音频太。。。有什么帮助吗 <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <title>Web Audio API Loops Demo</title> </head> <body> <f
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Web Audio API Loops Demo</title>
</head>
<body>
<form>
<button id="button-loop-1" type="button" value="1">Loop 1</button>
<button id="button-loop-2" type="button" value="2">Loop 2</button>
</form>
<script>
//--------------
// Audio Object
//--------------
var audio = {
buffer: {},
compatibility: {},
files: [
'synth.wav',
'beat.wav'
],
proceed: true,
source_loop: {},
source_once: {}
};
//-----------------
// Audio Functions
//-----------------
audio.findSync = function(n) {
var first = 0,
current = 0,
offset = 0;
// Find the audio source with the earliest startTime to sync all others to
for (var i in audio.source_loop) {
current = audio.source_loop[i]._startTime;
if (current > 0) {
if (current < first || first === 0) {
first = current;
}
}
}
if (audio.context.currentTime > first) {
offset = (audio.context.currentTime - first) % audio.buffer[n].duration;
}
return offset;
};
audio.play = function(n) {
if (audio.source_loop[n]._playing) {
audio.stop(n);
} else {
audio.source_loop[n] = audio.context.createBufferSource();
audio.source_loop[n].buffer = audio.buffer[n];
audio.source_loop[n].loop = true;
audio.source_loop[n].connect(audio.context.destination);
var offset = audio.findSync(n);
audio.source_loop[n]._startTime = audio.context.currentTime;
if (audio.compatibility.start === 'noteOn') {
/*
The depreciated noteOn() function does not support offsets.
Compensate by using noteGrainOn() with an offset to play once and then schedule a noteOn() call to loop after that.
*/
audio.source_once[n] = audio.context.createBufferSource();
audio.source_once[n].buffer = audio.buffer[n];
audio.source_once[n].connect(audio.context.destination);
audio.source_once[n].noteGrainOn(0, offset, audio.buffer[n].duration - offset); // currentTime, offset, duration
/*
Note about the third parameter of noteGrainOn().
If your sound is 10 seconds long, your offset 5 and duration 5 then you'll get what you expect.
If your sound is 10 seconds long, your offset 5 and duration 10 then the sound will play from the start instead of the offset.
*/
// Now queue up our looping sound to start immediatly after the source_once audio plays.
audio.source_loop[n][audio.compatibility.start](audio.context.currentTime + (audio.buffer[n].duration - offset));
} else {
audio.source_loop[n][audio.compatibility.start](0, offset);
}
audio.source_loop[n]._playing = true;
}
};
audio.stop = function(n) {
if (audio.source_loop[n]._playing) {
audio.source_loop[n][audio.compatibility.stop](0);
audio.source_loop[n]._playing = false;
audio.source_loop[n]._startTime = 0;
if (audio.compatibility.start === 'noteOn') {
audio.source_once[n][audio.compatibility.stop](0);
}
}
};
//-----------------------------
// Check Web Audio API Support
//-----------------------------
try {
// More info at http://caniuse.com/#feat=audio-api
window.AudioContext = window.AudioContext || window.webkitAudioContext;
audio.context = new window.AudioContext();
} catch(e) {
audio.proceed = false;
alert('Web Audio API not supported in this browser.');
}
if (audio.proceed) {
//---------------
// Compatibility
//---------------
(function() {
var start = 'start',
stop = 'stop',
buffer = audio.context.createBufferSource();
if (typeof buffer.start !== 'function') {
start = 'noteOn';
}
audio.compatibility.start = start;
if (typeof buffer.stop !== 'function') {
stop = 'noteOff';
}
audio.compatibility.stop = stop;
})();
//-------------------------------
// Setup Audio Files and Buttons
//-------------------------------
for (var a in audio.files) {
(function() {
var i = parseInt(a) + 1;
var req = new XMLHttpRequest();
req.open('GET', audio.files[i - 1], true); // array starts with 0 hence the -1
req.responseType = 'arraybuffer';
req.onload = function() {
audio.context.decodeAudioData(
req.response,
function(buffer) {
audio.buffer[i] = buffer;
audio.source_loop[i] = {};
var button = document.getElementById('button-loop-' + i);
button.addEventListener('click', function(e) {
e.preventDefault();
audio.play(this.value);
});
},
function() {
console.log('Error decoding audio "' + audio.files[i - 1] + '".');
}
);
};
req.send();
})();
}
}
</script>
</body>
</html>
Web音频API循环演示
回路1
环路2
//--------------
//音频对象
//--------------
var音频={
缓冲区:{},
兼容性:{},
档案:[
“synth.wav”,
“beat.wav”
],
是的,
源_循环:{},
source_once:{}
};
//-----------------
//音频功能
//-----------------
audio.findSync=函数(n){
var first=0,
电流=0,
偏移量=0;
//查找具有最早开始时间的音频源,以便将所有其他音频同步到
for(audio.source\u循环中的变量i){
当前=音频。源\u循环[i]。\u开始时间;
如果(当前>0){
如果(当前<第一个| |第一个===0){
第一个=电流;
}
}
}
如果(audio.context.currentTime>first){
偏移量=(audio.context.currentTime-first)%audio.buffer[n]。持续时间;
}
返回偏移量;
};
audio.play=功能(n){
if(音频源\u循环[n]。\u播放){
音频。停止(n);
}否则{
audio.source_loop[n]=audio.context.createBufferSource();
audio.source_循环[n].buffer=audio.buffer[n];
audio.source\u loop[n]。loop=true;
audio.source\u循环[n]。连接(audio.context.destination);
var offset=audio.findSync(n);
audio.source\u loop[n]。\u startTime=audio.context.currentTime;
if(audio.compatibility.start==='noteOn'){
/*
函数不支持偏移。
通过使用带有偏移量的noteGrainOn()进行补偿,播放一次,然后安排一个noteOn()调用以循环播放。
*/
audio.source_once[n]=audio.context.createBufferSource();
audio.source_一次[n].buffer=audio.buffer[n];
audio.source\u一次[n]。连接(audio.context.destination);
audio.source_once[n].noteGrainOn(0,偏移量,audio.buffer[n].duration-offset);//currentTime,offset,duration
/*
请注意noteGrainOn()的第三个参数。
如果你的声音是10秒长,偏移量是5,持续时间是5,那么你就会得到你想要的。
如果声音长度为10秒,偏移量为5,持续时间为10,则声音将从开始播放,而不是从偏移量开始播放。
*/
//现在将循环声音排队,以便在音频播放后立即启动。
audio.source_loop[n][audio.compatibility.start](audio.context.currentTime+(audio.buffer[n].duration-offset));
}否则{
audio.source\u loop[n][audio.compatibility.start](0,偏移量);
}
audio.source\u loop[n]。\u playing=true;
}
};
audio.stop=功能(n){
if(音频源\u循环[n]。\u播放){
audio.source_loop[n][audio.compatibility.stop](0);
audio.source\u loop[n]。\u playing=false;
音频.source\u loop[n]。\u startTime=0;
if(audio.compatibility.start==='noteOn'){
audio.source_once[n][audio.compatibility.stop](0);
}
}
};
//-----------------------------
//检查Web音频API支持
//-----------------------------
试一试{
//更多信息请访问http://caniuse.com/#feat=audio-原料药
window.AudioContext=window.AudioContext | | window.webkitadiocontext;
audio.context=新窗口。AudioContext();
}捕获(e){
audio.procedure=false;
警报(“此浏览器不支持Web音频API”);
}
如果(音频,继续){
//---------------
//相容性
//---------------
(功能(){
var start='start',
停止='停止',
buffer=audio.context.createBufferSource();
if(typeof buffer.start!=“函数”){
开始='noteOn';
}
audio.compatibility.start=开始;
if(type of buffer.stop!=“函数”){
停止='noteOff';
}
audio.compatibility.stop=停止;
})();
//-------------------------------
//设置音频文件和按钮
//-------------------------------
for(audio.files中的变量a){
(功能(){
变量i=parseInt(a)+1;
var req=新的XMLHttpRequest();
req.open('GET',audio.files[i-1],true);//数组以0开头,因此为-1
req.responseType='arraybuffer';
req.onload=函数(){
audio.context.decodeAudioData(
请求响应,
功能(缓冲区){
audio.buffer[i]=缓冲区;
audio.source_loop[i]={};
var button=document.getElementById('button-loop-'+i);
按钮。addEventListener('click',函数(e){
e、 预防默认值();
audio.play(这个值);
});
},
函数(){
log('Error decoding audio“'+audio.files[i-1]+'');
}
);
};
请求发送();
})();
}
}
查看RecordJS()。它应该可以帮助您。看看我是如何将RecordJS集成到录音机演示中的()。你说的不准确