Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/iphone/43.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Iphone 将音频输出重定向到手机扬声器,将麦克风输入重定向到耳机_Iphone_Objective C_Ios_Audio - Fatal编程技术网

Iphone 将音频输出重定向到手机扬声器,将麦克风输入重定向到耳机

Iphone 将音频输出重定向到手机扬声器,将麦克风输入重定向到耳机,iphone,objective-c,ios,audio,Iphone,Objective C,Ios,Audio,是否可以将音频输出重定向到手机扬声器,同时仍使用麦克风耳机输入 如果我将音频路由重定向到手机扬声器而不是耳机,它也会重定向麦克风。这是有道理的,但我似乎不能直接重定向麦克风输入?有什么想法吗 下面是我用来将音频重定向到扬声器的代码: UInt32 doChangeDefaultRoute = true; propertySetError = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultT

是否可以将音频输出重定向到手机扬声器,同时仍使用麦克风耳机输入

如果我将音频路由重定向到手机扬声器而不是耳机,它也会重定向麦克风。这是有道理的,但我似乎不能直接重定向麦克风输入?有什么想法吗

下面是我用来将音频重定向到扬声器的代码:

UInt32 doChangeDefaultRoute = true;        
propertySetError = AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker, sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
NSAssert(propertySetError == 0, @"Failed to set audio session property: OverrideCategoryDefaultToSpeaker");
UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute,sizeof (audioRouteOverride),&audioRouteOverride);

恐怕这不可能

从-kAudioSessionProperty_OverrideAudioRoute

如果在设置此属性的值时已插入耳机 要使用kAudioSessionOverrideAudioRoute_扬声器,系统会更改 输入和输出的音频路由:输入来自 内置麦克风;输出到内置扬声器


可能重复这是可能的,但它对如何设置很挑剔

[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute, sizeof(audioRouteOverride), &audioRouteOverride);
使用
音频会话类别播放和记录非常重要,否则该路径将无法到达扬声器。设置音频会话的覆盖路由后,可以使用
AVAudioPlayer
实例向扬声器发送一些输出

希望其他人也能像我一样。关于这一点的文档是分散的,但Skype应用程序证明了这是可能的。坚持下去,我的朋友们!:)

以下是一些Apple文档:


在页面上搜索
kAudioSessionProperty\u OverrideAudioRoute

在任何情况下,您都可以强制将音频输出到扬声器:

#导入“AudioRouter.h”
#进口
#进口
@音频路由器的实现
#定义是调试否
#定义正在调试\u额外\u信息否
+(void)initAudioSessionRouting{
//调用一次以通过扬声器传送所有音频,即使耳机插孔中插入了某些内容
静态布尔音频会话设置=否;
如果(音频会话设置==否){
//将类别设置为接受下面指定的属性
n错误*会话错误=nil;
[[AVAudioSession sharedInstance]设置类别:AvaudioSessionCategoryPlay并记录选项:AVAudioSessionCategoryOptionDefaultToSpeaker错误:&sessionError];
//双重强制扬声器输出音频
UInt32 audioRouteOverride=KAAudioSessionOverrideAudioRoute_扬声器;
AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute、sizeof(audioRouteOverride)和audioRouteOverride);
//修复音频中断视频录制的问题-允许音频在其他媒体上混合
UInt32 doSetProperty=1;
AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryMix with Others、sizeof(doSetProperty)和doSetProperty);
//激活
[[AVAudioSession sharedInstance]setDelegate:self];
[[AVAudioSession sharedInstance]设置活动:是错误:无];
//添加音频输入更改的侦听器
AudioSessionAddPropertyListener(kAudioSessionProperty_AudioRouteChange,OnAudioOutteChange,无);
AudioSessionAddPropertyListener(kAudioSessionProperty_AudioInputAvailable,OnAudioOutechange,无);
}
//强制扬声器输出音频
[[AVAudioSession sharedInstance]覆盖输出端口:AVAudioSessionPortOverrideSpeaker错误:nil];
//设旗
音频会话设置=是;
}
+(无效)切换到默认硬件{
//拆下内置扬声器
UInt32 audioRouteOverride=kAudioSessionOverrideAudioRoute\u None;
AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute、sizeof(audioRouteOverride)和audioRouteOverride);
}
+(无效)力输出多扬声器{
//重新强制扬声器输出音频
UInt32 audioRouteOverride=KAAudioSessionOverrideAudioRoute_扬声器;
AudioSessionSetProperty(kAudioSessionProperty_OverrideAudioRoute、sizeof(audioRouteOverride)和audioRouteOverride);
}
void onAudioOutechange(void*clientData、AudioSessionPropertyID inID、UInt32数据大小、const void*inData){
如果(正在调试==是){
NSLog(@“===音频硬件状态===”);
NSLog(@“当前输入:%@,[AudioRouter getAudioSessionInput]);
NSLog(@“当前输出:%@,[AudioRouter getAudioSessionOutput]);
NSLog(@“当前硬件路由:%@,[AudioRouter getAudioSessionRoute]);
NSLog(@“=================================================”);
}
如果(是否调试\u额外\u信息==是){
NSLog(@“===音频硬件状态(扩展)==”;
CFDictionaryRef dict=(CFDictionaryRef)inData;
CFNumberRef reason=CFDictionaryGetValue(dict、kAudioSession\u RouteChangeKey\u reason);
CFDictionaryRef oldRoute=CFDictionaryGetValue(dict、kAudioSession\u AudioRouteChangeKey\u先前的路由描述);
CFDictionaryRef newRoute=CFDictionaryGetValue(dict、kAudioSession\U AudioRouteChangeKey\U CurrentRouteDescription);
NSLog(@“音频旧路由:%@”,旧路由);
NSLog(@“音频新路由:%@”,新路由);
NSLog(@“===================================================================”);
}
}
+(NSString*)getAudioSessionInput{
UInt32路由化;
AudioSessionGetPropertySize(kAudioSessionProperty_AudioRoutedDescription和routeSize);
CFDictionaryRef desc;//这是包含描述的字典
//调用以获取音频描述并填充描述字典
AudioSessionGetProperty(kAudioSessionProperty_AudioRoutedDescription和RoutedSize和desc);
//字典包含两个键,用于输入和输出。获取输出数组
CFArrayRef输出=CFDictionaryGetValue(desc、kAudioSession\u AudioRouteKey\u输入);
//输出数组包含1个元素-字典
CFDictionaryRef diction=CFArrayGetValueAtIndex(输出,0);
//从字典中获取输出描述
CFStringRef输入=CFDictionaryGetValue(词汇、音频会话类型);
返回[NSString stringWithFormat:@“%@”,输入];
}
+(NSString*)getAudioSessionOutput{
UInt32反渗透
@interface AudioRouter : NSObject

+ (void) initAudioSessionRouting;
+ (void) switchToDefaultHardware;
+ (void) forceOutputToBuiltInSpeakers;

@end
#import "AudioRouter.h"
#import <AudioToolbox/AudioToolbox.h>
#import <AVFoundation/AVFoundation.h>

@implementation AudioRouter

#define IS_DEBUGGING NO
#define IS_DEBUGGING_EXTRA_INFO NO

+ (void) initAudioSessionRouting {

    // Called once to route all audio through speakers, even if something's plugged into the headphone jack
    static BOOL audioSessionSetup = NO;
    if (audioSessionSetup == NO) {

        // set category to accept properties assigned below
        NSError *sessionError = nil;
        [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error: &sessionError];

        // Doubly force audio to come out of speaker
        UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
        AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute, sizeof(audioRouteOverride), &audioRouteOverride);

        // fix issue with audio interrupting video recording - allow audio to mix on top of other media
        UInt32 doSetProperty = 1;
        AudioSessionSetProperty (kAudioSessionProperty_OverrideCategoryMixWithOthers, sizeof(doSetProperty), &doSetProperty);

        // set active
        [[AVAudioSession sharedInstance] setDelegate:self];
        [[AVAudioSession sharedInstance] setActive: YES error: nil];

        // add listener for audio input changes
        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioRouteChange, onAudioRouteChange, nil );
        AudioSessionAddPropertyListener (kAudioSessionProperty_AudioInputAvailable, onAudioRouteChange, nil );

    }

    // Force audio to come out of speaker
    [[AVAudioSession sharedInstance] overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:nil];


    // set flag
    audioSessionSetup = YES;
}

+ (void) switchToDefaultHardware {
    // Remove forcing to built-in speaker
    UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_None;
    AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute, sizeof(audioRouteOverride), &audioRouteOverride);
}

+ (void) forceOutputToBuiltInSpeakers {
    // Re-force audio to come out of speaker
    UInt32 audioRouteOverride = kAudioSessionOverrideAudioRoute_Speaker;
    AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute, sizeof(audioRouteOverride), &audioRouteOverride);


}

void onAudioRouteChange (void* clientData, AudioSessionPropertyID inID, UInt32 dataSize, const void* inData) {

    if( IS_DEBUGGING == YES ) {
        NSLog(@"==== Audio Harware Status ====");
        NSLog(@"Current Input:  %@", [AudioRouter getAudioSessionInput]);
        NSLog(@"Current Output: %@", [AudioRouter getAudioSessionOutput]);
        NSLog(@"Current hardware route: %@", [AudioRouter getAudioSessionRoute]);
        NSLog(@"==============================");
    }

    if( IS_DEBUGGING_EXTRA_INFO == YES ) {
        NSLog(@"==== Audio Harware Status (EXTENDED) ====");
        CFDictionaryRef dict = (CFDictionaryRef)inData;
        CFNumberRef reason = CFDictionaryGetValue(dict, kAudioSession_RouteChangeKey_Reason);
        CFDictionaryRef oldRoute = CFDictionaryGetValue(dict, kAudioSession_AudioRouteChangeKey_PreviousRouteDescription);
        CFDictionaryRef newRoute = CFDictionaryGetValue(dict, kAudioSession_AudioRouteChangeKey_CurrentRouteDescription);
        NSLog(@"Audio old route: %@", oldRoute);
        NSLog(@"Audio new route: %@", newRoute);
        NSLog(@"=========================================");
    }



}

+ (NSString*) getAudioSessionInput {
    UInt32 routeSize;
    AudioSessionGetPropertySize(kAudioSessionProperty_AudioRouteDescription, &routeSize);
    CFDictionaryRef desc; // this is the dictionary to contain descriptions

    // make the call to get the audio description and populate the desc dictionary
    AudioSessionGetProperty (kAudioSessionProperty_AudioRouteDescription, &routeSize, &desc);

    // the dictionary contains 2 keys, for input and output. Get output array
    CFArrayRef outputs = CFDictionaryGetValue(desc, kAudioSession_AudioRouteKey_Inputs);

    // the output array contains 1 element - a dictionary
    CFDictionaryRef diction = CFArrayGetValueAtIndex(outputs, 0);

    // get the output description from the dictionary
    CFStringRef input = CFDictionaryGetValue(diction, kAudioSession_AudioRouteKey_Type);
    return [NSString stringWithFormat:@"%@", input];
}

+ (NSString*) getAudioSessionOutput {
    UInt32 routeSize;
    AudioSessionGetPropertySize(kAudioSessionProperty_AudioRouteDescription, &routeSize);
    CFDictionaryRef desc; // this is the dictionary to contain descriptions

    // make the call to get the audio description and populate the desc dictionary
    AudioSessionGetProperty (kAudioSessionProperty_AudioRouteDescription, &routeSize, &desc);

    // the dictionary contains 2 keys, for input and output. Get output array
    CFArrayRef outputs = CFDictionaryGetValue(desc, kAudioSession_AudioRouteKey_Outputs);

    // the output array contains 1 element - a dictionary
    CFDictionaryRef diction = CFArrayGetValueAtIndex(outputs, 0);

    // get the output description from the dictionary
    CFStringRef output = CFDictionaryGetValue(diction, kAudioSession_AudioRouteKey_Type);
    return [NSString stringWithFormat:@"%@", output];
}

+ (NSString*) getAudioSessionRoute {
    /*
     returns the current session route:
     * ReceiverAndMicrophone
     * HeadsetInOut
     * Headset
     * HeadphonesAndMicrophone
     * Headphone
     * SpeakerAndMicrophone
     * Speaker
     * HeadsetBT
     * LineInOut
     * Lineout
     * Default
    */

    UInt32 rSize = sizeof (CFStringRef);
    CFStringRef route;
    AudioSessionGetProperty (kAudioSessionProperty_AudioRoute, &rSize, &route);

    if (route == NULL) {
        NSLog(@"Silent switch is currently on");
        return @"None";
    }
    return [NSString stringWithFormat:@"%@", route];
}

@end