Javascript Web音频API:如何使用FFT从时域转换并使用iFFT将数据转换回
我一直在尝试将音频数据转换为频域数据,编辑该数据,并从该数据重建音频 我遵照以下指示:Javascript Web音频API:如何使用FFT从时域转换并使用iFFT将数据转换回,javascript,html,audio,web-audio-api,Javascript,Html,Audio,Web Audio Api,我一直在尝试将音频数据转换为频域数据,编辑该数据,并从该数据重建音频 我遵照以下指示: 为了获得用于执行分析的缓冲区 进行分析,以及 重建波浪 由OfflineAudioContext呈现的音频应与PeriodicWave的音频匹配,但显然不匹配。不过,说明书上说应该这样做,所以我显然遗漏了一些东西 (此外,我不知道如何使用PeriodicWave的实值和虚值输入。根据指令,实值是正弦,虚值是余弦,因此我将所有虚值设置为0,因为我从AnalyzerNode的FFT分析中没有余弦值,而且似乎没有另
OfflineAudioContext
呈现的音频应与PeriodicWave
的音频匹配,但显然不匹配。不过,说明书上说应该这样做,所以我显然遗漏了一些东西
(此外,我不知道如何使用PeriodicWave
的实值和虚值输入。根据指令,实值是正弦,虚值是余弦,因此我将所有虚值设置为0,因为我从AnalyzerNode
的FFT分析中没有余弦值,而且似乎没有另一种方式。)
到目前为止,我得到的最简单和最接近的脚本是以下脚本():
音频测试
音频测试
播放原声
播放重建声音
var pre=document.querySelector('pre');
var myScript=document.getElementById('script');
pre.innerHTML=myScript.innerHTML;
var buttonOriginal=document.getElementById('0');
var buttonrecontr=document.getElementById('1');
var audioCtx=new(window.AudioContext | | window.webkitadiocontext)();
var通道=2;
var sampleRate=audioCtx.sampleRate;
var frameCount=sampleRate*2.0;
var offlineCtx=新的OfflineAudioContext(通道、帧数、采样器);
var myArrayBuffer=offlineCtx.createBuffer(通道、帧数、采样率);
var offlineSource=offlineCtx.createBufferSource();
var Analyzer=offlineCtx.createAnalyzer();
var-pi=Math.pi;
var songPos=[0,0];
用于(var通道=0;通道<通道;通道++){
var nowBuffering=myArrayBuffer.getChannelData(通道);
对于(变量i=0;i{
console.log(“重建声音完成”);
}
}
buttonOriginal.onclick=函数(){
var song=audioCtx.createBufferSource();
song.buffer=renderedBuffer;
song.connect(audioCtx.destination);
song.start();
song.oneded=()=>{
console.log(“原始声音完成”);
}
}
})/*.catch(函数(err){
console.log('呈现失败:'+错误);
//注意:当在OfflineAudioContext上第二次调用startRendering时,承诺应被拒绝
});*/
函数freqSin(频率、时间){
返回数学sin(freq*(2*pi)*时间);
}
功能合成器(通道){
var-time=songPos[channel]/sampleRate;
交换机(通道){
案例0:
var-freq=200+10*freqSin(9,时间);;
var-amp=0.7;
var输出=安培*数学正弦(频率*(2*pi)*时间);
打破
案例1:
var freq=900+10*freqSin(10,时间);
var-amp=0.7;
var输出=安培*数学正弦(频率*(2*pi)*时间);
打破
}
//console.log(输出)
返回输出;
}
这个脚本的一个有趣的副作用是,在播放原始声音后,您不能播放重建的声音(尽管您可以随心所欲地播放原始声音)。为了播放重建的声音,您必须先播放它,然后它只能在刷新时播放。(如果在播放原始声音时播放,也可以在播放原始声音后播放。)要使其工作,您需要时域信号FFT的实部和虚部。AnalyzerNode仅提供幅值;缺少相位分量 对不起,这行不通
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width">
<title>Audio Test</title>
<link rel="stylesheet" href="">
<!--[if lt IE 9]>
<script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
</head>
<body>
<h1>Audio Test</h1>
<button id='0'>Play original sound</button>
<button id='1'>Play reconstructed sound</button>
<pre></pre>
</body>
<script id='script'>
var pre = document.querySelector('pre');
var myScript = document.getElementById('script');
pre.innerHTML = myScript.innerHTML;
var buttonOriginal = document.getElementById('0');
var buttonReconstr = document.getElementById('1');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var channels = 2;
var sampleRate = audioCtx.sampleRate;
var frameCount = sampleRate * 2.0;
var offlineCtx = new OfflineAudioContext(channels, frameCount, sampleRate);
var myArrayBuffer = offlineCtx.createBuffer(channels, frameCount, sampleRate);
var offlineSource = offlineCtx.createBufferSource();
var analyser = offlineCtx.createAnalyser();
var pi = Math.PI;
var songPos = [0, 0];
for (var channel = 0; channel < channels; channel++) {
var nowBuffering = myArrayBuffer.getChannelData(channel);
for (var i = 0; i < frameCount; i++) {
songPos[channel]++;
nowBuffering[i] = synth(channel);
}
}
analyser.connect(offlineCtx.destination);
offlineSource.connect(analyser);
offlineSource.buffer = myArrayBuffer;
offlineSource.start();
offlineCtx.startRendering().then(function (renderedBuffer) {
console.log('Rendering completed successfully');
analyser.fftSize = 2048;
var bufferLength = analyser.frequencyBinCount;
var dataArray = new Float32Array(bufferLength);
analyser.getFloatFrequencyData(dataArray);
console.log(dataArray);
// Remove -infinity
for (var i = 0; i < dataArray.length; i++) {
if(dataArray[i]==-Infinity)
dataArray[i] = -255;
}
/// Reconstruct
// Create array of zeros
var imagArray = new Float32Array(bufferLength);
for (var i = 0; i < imagArray.length; i++) imagArray[i] = 0;
var wave = audioCtx.createPeriodicWave(dataArray, imagArray, {disableNormalization: true});
console.log(wave);
buttonReconstr.onclick = function() {
var wave = audioCtx.createPeriodicWave(dataArray, imagArray, {disableNormalization: true});
var osc = audioCtx.createOscillator();
osc.setPeriodicWave(wave);
osc.connect(audioCtx.destination);
osc.start();
osc.stop(2);
osc.onended = () => {
console.log('Reconstructed sound finished');
}
}
buttonOriginal.onclick = function() {
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
song.start();
song.onended = () => {
console.log('Original sound finished');
}
}
})/*.catch(function (err) {
console.log('Rendering failed: ' + err);
// Note: The promise should reject when startRendering is called a second time on an OfflineAudioContext
});*/
function freqSin(freq, time) {
return Math.sin(freq * (2 * pi) * time);
}
function synth(channel) {
var time = songPos[channel] / sampleRate;
switch (channel) {
case 0:
var freq = 200 + 10 * freqSin(9, time);;
var amp = 0.7;
var output = amp * Math.sin(freq * (2 * pi) * time);
break;
case 1:
var freq = 900 + 10 * freqSin(10, time);
var amp = 0.7;
var output = amp * Math.sin(freq * (2 * pi) * time);
break;
}
//console.log(output)
return output;
}
</script>
</html>