Java 权限拒绝错误-SpeechRecognizer作为连续服务?(android.permission.interaction\u跨用户\u FULL)

Java 权限拒绝错误-SpeechRecognizer作为连续服务?(android.permission.interaction\u跨用户\u FULL),java,android,speech-recognition,Java,Android,Speech Recognition,编辑: 我已将服务代码更改为实现已启动的服务,而不是以下更新的StreamService.java中的IntentService 现在,我得到了关于权限拒绝错误的错误,如StreamService.java之后的logcat消息中所述 package com.example.speechsensorservice; import java.util.ArrayList; import android.app.Service; import android.content.Broadc

编辑: 我已将服务代码更改为实现已启动的服务,而不是以下更新的StreamService.java中的IntentService 现在,我得到了关于权限拒绝错误的错误,如StreamService.java之后的logcat消息中所述

    package com.example.speechsensorservice;

import java.util.ArrayList;

import android.app.Service;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.os.Bundle;
import android.os.IBinder;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.util.Log;

public class StreamService extends Service {
     private static final String TAG = "SpeechSensor";
     private static final String ACTION = "com.example.speechsensorservice.TEXT";
    private SpeechRecognizer sr;

    private BroadcastReceiver sReceiver;

    private boolean headsetConnected = true;

    String text;


    @Override
    public IBinder onBind(Intent arg0) {
        // TODO Auto-generated method stub
        return null;
    }

    @Override
    public void onCreate() {
        Log.d(TAG, "onCreate() StreamService Method");
        super.onCreate();
        sReceiver = new BroadcastReceiver() {
            public void onReceive(Context arg0, Intent arg1) {
                // TODO Auto-generated method stub
                if ( Intent.ACTION_HEADSET_PLUG.equals(arg1.getAction()) ) {
                    if ( arg1.hasExtra("state")) {
                            if ( headsetConnected && arg1.getIntExtra("state", 0) == 0 ) {
                                headsetConnected = false;
                                stopStreaming(); 
                            } 
                    }
                }
            }

        };  
        this.registerReceiver(sReceiver, new IntentFilter(Intent.ACTION_HEADSET_PLUG)); 
    }

    @Override
    public int onStartCommand(Intent intent, int flags, int startId) {
        Log.d(TAG,"Inside onStartCommand()");
    //  Runnable r = new Runnable() {
    //      public void run() {
                startStreaming();
    //      }
    //  };

    //  Thread t = new Thread(r);
    //  t.start();

        return Service.START_STICKY;

    }

    @Override
    public  void onDestroy() {
        Log.d(TAG, "onDestroy() StreamService Method");
        super.onDestroy();
        this.unregisterReceiver(this.sReceiver);
    }


     public void startStreaming() {
         Log.d(TAG, "Inside startStreaming()");
            Intent intent;
            text = "";
            if ( !SpeechRecognizer.isRecognitionAvailable(this) ) {
                Log.d(TAG, "Not Applicable with your device");
                text = "NA";
                intent = new Intent(ACTION);
                intent.putExtra("Identity", text);
                sendBroadcast(intent);
            }
            else {
                Log.d(TAG, "started taking input");
                sr = SpeechRecognizer.createSpeechRecognizer(this.getApplicationContext());

                intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);

                //intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "hi-IN");
                intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en-US");//RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);//RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH);
             //   intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 3);

                sr.setRecognitionListener( new mylistener());
                sr.startListening(intent);
            }

     }

     public void stopStreaming() {
            if ( sr == null ) return;
            Log.d(TAG, "stopped taking input");
            sr.cancel();
            sr.destroy();
            sr = null;
            this.stopSelf();
     }

     public boolean isStreaming() {
            // TODO Auto-generated method stub
            Log.d(TAG,"isStreaming : YES");
            if ( sr != null ) return true;
            return false;
     }

     class mylistener implements RecognitionListener {

            @Override
            public void onBeginningOfSpeech() {
                // TODO Auto-generated method stub
                Log.d(TAG, "onBeginningOfSpeech");
            }

            @Override
            public void onBufferReceived(byte[] arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onEndOfSpeech() {
                // TODO Auto-generated method stub
                Log.d(TAG, "onEndOfSpeech");
            }

            @Override
            public void onError(int arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onEvent(int arg0, Bundle arg1) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onPartialResults(Bundle arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onReadyForSpeech(Bundle arg0) {
                // TODO Auto-generated method stub
                Log.d(TAG, "onReadyForSpeech");
            }

            @Override
            public void onResults(Bundle arg0) {
                // TODO Auto-generated method stub


                Log.d(TAG, "Got Results");
                ArrayList<String> al = arg0.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
                text = al.get(0);
                for ( int i =0 ; i < al.size(); i++ ) {
                    Log.d(TAG,"result=" + al.get(i));
                }
                Intent intent = new Intent(ACTION);
                intent.putExtra("Identifier", text);
                sendBroadcast(intent);
              //  startStreaming();

            }

            @Override
            public void onRmsChanged(float arg0) {
                // TODO Auto-generated method stub

            }

        }

}
编辑:

As mentioned in Android Developer site that SpeechRecognizer API can only be used as Application Context. Is there any woraround with which I can get it working
我已经实现了包含所有UI组件的MainActivity类。课程安排如下

代码-MainActivity.java

package com.example.speechsensorservice;

import android.app.Activity;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.os.Bundle;
import android.util.Log;
import android.view.Menu;
import android.view.View;
import android.widget.ImageButton;
import android.widget.TextView;
import android.widget.Toast;

public class MainActivity extends Activity {


    private static final String TAG = "SpeechSensor";

    private boolean headsetConnected = false;

    public TextView txtText;

    private BroadcastReceiver mReceiver;
    private ImageButton btnSpeak;

    @Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        txtText = (TextView) findViewById(R.id.txtText);
        btnSpeak = (ImageButton) findViewById(R.id.btnSpeak);

        btnSpeak.setOnClickListener(new View.OnClickListener() {

            @Override
            public void onClick(View v) {
                Intent intent = new Intent(getApplicationContext(),StreamService.class);
                startService(intent);
            }
        });
    }

    protected void onResume() {
        super.onResume();

        IntentFilter sIF = new IntentFilter();
        sIF.addAction(Intent.ACTION_HEADSET_PLUG);
        sIF.addAction("com.example.speechsensorservice.TEXT");
        mReceiver = new BroadcastReceiver() {

                @Override
            public void onReceive(Context arg0, Intent arg1) {
                // TODO Auto-generated method stub
                String act = arg1.getAction();
                Log.d(TAG, "Received Action = " + act);
                if ( Intent.ACTION_HEADSET_PLUG.equals(act) ) {
                    if ( arg1.hasExtra("state")) {
                        if ( !headsetConnected && arg1.getIntExtra("state", 0) == 1 ) {
                            headsetConnected = true;
                            txtText.setText("Headset Plugged in");
                            startNoiseProcessService();
                        }
                    }
                }
                else if ( act.equals("com.example.speechsensorservice.TEXT") ){
                    if ( arg1.hasExtra("Identity")) {
                        String s = arg1.getStringExtra("Identity");
                        if ( s.equals("NA") ) {
                            Toast t = Toast.makeText(getApplicationContext(), 
                                    "Your Device doesnot support Speech to Text", 
                                    Toast.LENGTH_SHORT);
                            t.show();
                        }
                        else txtText.setText(s);
                    }
                }
            }

        };  

        this.registerReceiver(mReceiver, sIF);      
    }

    public void onPause() {
        super.onPause();
        this.unregisterReceiver(this.mReceiver);
    }

    @Override
    public boolean onCreateOptionsMenu(Menu menu) {
       getMenuInflater().inflate(R.menu.main, menu);
        return true;
    }

    public void startNoiseProcessService() {
        Intent intent = new Intent(this,StreamService.class);
        startService(intent);
    }


}
我实现的另一个类通过继承IntentService类作为后台任务启动语音识别服务。具体实施如下

代码-StreamService.java

    package com.example.speechsensorservice;

import java.util.ArrayList;

import android.app.Service;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.os.Bundle;
import android.os.IBinder;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.util.Log;

public class StreamService extends Service {
     private static final String TAG = "SpeechSensor";
     private static final String ACTION = "com.example.speechsensorservice.TEXT";
    private SpeechRecognizer sr;

    private BroadcastReceiver sReceiver;

    private boolean headsetConnected = true;

    String text;


    @Override
    public IBinder onBind(Intent arg0) {
        // TODO Auto-generated method stub
        return null;
    }

    @Override
    public void onCreate() {
        Log.d(TAG, "onCreate() StreamService Method");
        super.onCreate();
        sReceiver = new BroadcastReceiver() {
            public void onReceive(Context arg0, Intent arg1) {
                // TODO Auto-generated method stub
                if ( Intent.ACTION_HEADSET_PLUG.equals(arg1.getAction()) ) {
                    if ( arg1.hasExtra("state")) {
                            if ( headsetConnected && arg1.getIntExtra("state", 0) == 0 ) {
                                headsetConnected = false;
                                stopStreaming(); 
                            } 
                    }
                }
            }

        };  
        this.registerReceiver(sReceiver, new IntentFilter(Intent.ACTION_HEADSET_PLUG)); 
    }

    @Override
    public int onStartCommand(Intent intent, int flags, int startId) {
        Log.d(TAG,"Inside onStartCommand()");
    //  Runnable r = new Runnable() {
    //      public void run() {
                startStreaming();
    //      }
    //  };

    //  Thread t = new Thread(r);
    //  t.start();

        return Service.START_STICKY;

    }

    @Override
    public  void onDestroy() {
        Log.d(TAG, "onDestroy() StreamService Method");
        super.onDestroy();
        this.unregisterReceiver(this.sReceiver);
    }


     public void startStreaming() {
         Log.d(TAG, "Inside startStreaming()");
            Intent intent;
            text = "";
            if ( !SpeechRecognizer.isRecognitionAvailable(this) ) {
                Log.d(TAG, "Not Applicable with your device");
                text = "NA";
                intent = new Intent(ACTION);
                intent.putExtra("Identity", text);
                sendBroadcast(intent);
            }
            else {
                Log.d(TAG, "started taking input");
                sr = SpeechRecognizer.createSpeechRecognizer(this.getApplicationContext());

                intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);

                //intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, "hi-IN");
                intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, "en-US");//RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);//RecognizerIntent.LANGUAGE_MODEL_WEB_SEARCH);
             //   intent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 3);

                sr.setRecognitionListener( new mylistener());
                sr.startListening(intent);
            }

     }

     public void stopStreaming() {
            if ( sr == null ) return;
            Log.d(TAG, "stopped taking input");
            sr.cancel();
            sr.destroy();
            sr = null;
            this.stopSelf();
     }

     public boolean isStreaming() {
            // TODO Auto-generated method stub
            Log.d(TAG,"isStreaming : YES");
            if ( sr != null ) return true;
            return false;
     }

     class mylistener implements RecognitionListener {

            @Override
            public void onBeginningOfSpeech() {
                // TODO Auto-generated method stub
                Log.d(TAG, "onBeginningOfSpeech");
            }

            @Override
            public void onBufferReceived(byte[] arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onEndOfSpeech() {
                // TODO Auto-generated method stub
                Log.d(TAG, "onEndOfSpeech");
            }

            @Override
            public void onError(int arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onEvent(int arg0, Bundle arg1) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onPartialResults(Bundle arg0) {
                // TODO Auto-generated method stub

            }

            @Override
            public void onReadyForSpeech(Bundle arg0) {
                // TODO Auto-generated method stub
                Log.d(TAG, "onReadyForSpeech");
            }

            @Override
            public void onResults(Bundle arg0) {
                // TODO Auto-generated method stub


                Log.d(TAG, "Got Results");
                ArrayList<String> al = arg0.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
                text = al.get(0);
                for ( int i =0 ; i < al.size(); i++ ) {
                    Log.d(TAG,"result=" + al.get(i));
                }
                Intent intent = new Intent(ACTION);
                intent.putExtra("Identifier", text);
                sendBroadcast(intent);
              //  startStreaming();

            }

            @Override
            public void onRmsChanged(float arg0) {
                // TODO Auto-generated method stub

            }

        }

}

这个问题是自我解决的,logCat中的第一行给出的解决方案是,当前线程没有执行用户任务的权限,所以只需在manifest中添加以下权限,看看是否有效

<uses-permission android:name="android.permission.INTERACT_ACROSS_USERS_FULL">


让我知道我是否正确理解了这个问题

有时候,这个特定的错误实际上是误导性的,并且是由其他运行时问题引起的

我记录了一个这样的例子——一个抛出的NullPointerException最终被报告为同一个错误,尽管它与跨用户权限无关

在我的特定案例中,ProGuard剥离了我需要的一个方法,这导致抛出一个NullPointerException。堆栈跟踪如下所示:

Permission Denial: get/set setting for user asks to run as user -2 but is calling from user 0; this requires android.permission.INTERACT_ACROSS_USERS_FULL
java.lang.NullPointerException
 at java.lang.Enum$1.create(Enum.java:43)
 at java.lang.Enum$1.create(Enum.java:35)
 at libcore.util.BasicLruCache.get(BasicLruCache.java:54)
 at java.lang.Enum.getSharedConstants(Enum.java:209)
 at java.lang.Enum.valueOf(Enum.java:189)
 at com.my.app.package.b.c.a(Unknown Source)
 at com.my.app.package.b.a.onCreate(Unknown Source)
 at android.support.v4.app.FragmentManagerImpl.moveToState(Unknown Source)
 at android.support.v4.app.FragmentManagerImpl.moveToState(Unknown Source)
 at android.support.v4.app.BackStackRecord.run(Unknown Source)
 at android.support.v4.app.FragmentManagerImpl.execPendingActions(Unknown Source)
 at android.support.v4.app.FragmentManagerImpl$1.run(Unknown Source)
 at android.os.Handler.handleCallback(Handler.java:730)
 at android.os.Handler.dispatchMessage(Handler.java:92)
 at android.os.Looper.loop(Looper.java:137)
 at android.app.ActivityThread.main(ActivityThread.java:5455)
 at java.lang.reflect.Method.invokeNative(Native Method)
 at java.lang.reflect.Method.invoke(Method.java:525)
 at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:1187)
 at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:1003)
 at dalvik.system.NativeStart.main(Native Method)
我完全不知道为什么Android会将NullPointerException转换为Android.permission.INTERACT\u over\u USERS\u FULL error,但显而易见的解决方案是调整ProGuard配置,这样方法就不会被剥夺

我调用的方法在枚举上没有“valueOf”方法。事实证明,在引擎盖下有一些有趣的反射(我在上面的链接中进行了介绍),但我的解决方案是将以下内容添加到我的ProGuard配置中

-keepclassmembers enum * {
    public static **[] values();
    public static ** valueOf(java.lang.String);
}

文档上写着“工作线程”。使用常规维修并覆盖
onStartCommand
静止,不工作。是否有任何方法可以使用主线程上下文运行speechrecognizer实例。i、 e.UI线程除了
IntentService
中的
OnHandleContent
之外,
服务的所有方法都在主线程中执行。请参见我在“对不起”中的回答,但它仍在请求(android.permission.Interactive\u Cross\u USERS\u FULL)此权限是签名级别的权限,我们只能在我的应用程序的签名与系统相同时使用此权限。如本文所述:--这对于典型的应用程序开发人员来说是不可行的。请注意,这是不鼓励的,因此答案应该是搜索解决方案的终点(相对于另一个引用的停顿,它会随着时间的推移而变得陈旧)。请考虑在这里添加一个独立的概要,保持链接作为引用。