Audio 关于NotchFilter,过滤器未完全关闭

Audio 关于NotchFilter,过滤器未完全关闭,audio,filter,Audio,Filter,我试着用音乐滤波器滤除特定的赫兹,但滤除是不完整的。 我使用的库是:(ddf.minim.effects.NotchFilter) 我试着过滤掉3000-7000赫兹的部分,我保存过滤后的音频,用软件分析音频,发现3000-7000赫兹没有被完全过滤掉(只有一小部分滤波器) 我的代码: final int minBufferSize = AudioTrack.getMinBufferSize(44100, AudioFormat.CHANNEL_OUT_STEREO,

我试着用音乐滤波器滤除特定的赫兹,但滤除是不完整的。 我使用的库是:(ddf.minim.effects.NotchFilter)

我试着过滤掉3000-7000赫兹的部分,我保存过滤后的音频,用软件分析音频,发现3000-7000赫兹没有被完全过滤掉(只有一小部分滤波器)

我的代码:

final int minBufferSize = AudioTrack.getMinBufferSize(44100,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);

    mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 44100,
            AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
            minBufferSize, AudioTrack.MODE_STREAM);

notchLeft = new NotchFilter(5000, 4000,44100);
notchRight = new NotchFilter(5000, 4000,44100);

float[][]信号=解交织数据(shortToFloat(buffer),2);
如果(isFilterLeft){
notchLeft.进程(信号[0]);
}
如果(isFilterRight){
notchRight.过程(信号[1]);
}
float[]newBuffer=交织数据(信号);
if(缓冲区长度<新缓冲区长度)
缓冲区=新短[新缓冲区长度];
for(int i=0;i

private float[] shortToFloat(short[] audio) {

        float[] converted = new float[audio.length];

        for (int i = 0; i < converted.length; i++) {
            // [-3276,32768] -> [-1,1]
            converted[i] = audio[i] / 32768f;
        }

        return converted;
    }

    float[][] deinterleaveData(float[] samples, int numChannels) {
        // assert(samples.length() % numChannels == 0);
        int numFrames = samples.length / numChannels;

        float[][] result = new float[numChannels][];
        for (int ch = 0; ch < numChannels; ch++) {
            result[ch] = new float[numFrames];
            for (int i = 0; i < numFrames; i++) {
                result[ch][i] = samples[numChannels * i + ch];
            }
        }
        return result;
    }

    float[] interleaveData(float[][] data) {
        int numChannels = data.length;
        int numFrames = data[0].length;

        float[] result = new float[numFrames * numChannels];
        for (int i = 0; i < numFrames; i++) {
            for (int ch = 0; ch < numChannels; ch++) {
                result[numChannels * i + ch] = data[ch][i];
            }
        }
        return result;
    }
private float[]shortToFloat(short[]audio){
float[]converted=新的float[audio.length];
对于(int i=0;i [-1,1]
转换后的[i]=音频[i]/32768f;
}
转换收益;
}
浮点[]解交错数据(浮点[]采样,整数通道){
//断言(samples.length()%numChannels==0);
int numFrames=samples.length/numChannels;
float[][]结果=新的float[numChannels][];
对于(int ch=0;ch
我把音频数据分成两个通道,分别过滤,然后放在一起。 我不确定出了什么问题。你知道有什么问题吗

private float[] shortToFloat(short[] audio) {

        float[] converted = new float[audio.length];

        for (int i = 0; i < converted.length; i++) {
            // [-3276,32768] -> [-1,1]
            converted[i] = audio[i] / 32768f;
        }

        return converted;
    }

    float[][] deinterleaveData(float[] samples, int numChannels) {
        // assert(samples.length() % numChannels == 0);
        int numFrames = samples.length / numChannels;

        float[][] result = new float[numChannels][];
        for (int ch = 0; ch < numChannels; ch++) {
            result[ch] = new float[numFrames];
            for (int i = 0; i < numFrames; i++) {
                result[ch][i] = samples[numChannels * i + ch];
            }
        }
        return result;
    }

    float[] interleaveData(float[][] data) {
        int numChannels = data.length;
        int numFrames = data[0].length;

        float[] result = new float[numFrames * numChannels];
        for (int i = 0; i < numFrames; i++) {
            for (int ch = 0; ch < numChannels; ch++) {
                result[numChannels * i + ch] = data[ch][i];
            }
        }
        return result;
    }