io.grpc.internal.DnsNameResolverProvider#android.speech.SpeechRecognizer源码实例Demo

下面列出了io.grpc.internal.DnsNameResolverProvider#android.speech.SpeechRecognizer 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: jellyfin-androidtv   文件: SearchActivity.java
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);

    boolean isSpeechEnabled = SpeechRecognizer.isRecognitionAvailable(this);

    // Determine fragment to use
    Fragment searchFragment = isSpeechEnabled
            ? new LeanbackSearchFragment()
            : new TextSearchFragment();

    // Add fragment
    getSupportFragmentManager()
            .beginTransaction()
            .replace(android.R.id.content, searchFragment)
            .commit();
}
 
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
    super.onActivityResult(requestCode, resultCode, data);

    if(requestCode == REQUEST_CODE && resultCode == RESULT_OK){

        /**
         * The matched text with the highest confidence score will be in position 0
         */
        ArrayList<String> matches = data.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);

        if(matches != null && matches.size() >0){
            String sentence = matches.get(0);
            speechRecognitionListener.getOnSpeechRecognitionListener()
                    .OnSpeechRecognitionFinalResult(sentence);

            return;
        }
    }

    speechRecognitionListener.onError(SpeechRecognizer.ERROR_NO_MATCH);
}
 
源代码3 项目: Saiy-PS   文件: RecognitionGoogleCloud.java
private void showPlayServicesError(final int errorCode) {
    if (DEBUG) {
        MyLog.i(CLS_NAME, "showPlayServicesError");
    }

    onError(SpeechRecognizer.ERROR_CLIENT);

    switch (errorCode) {

        case UNRECOVERABLE:
            // TODO
            break;
        default:
            final GoogleApiAvailability apiAvailability = GoogleApiAvailability.getInstance();
            apiAvailability.showErrorNotification(mContext, errorCode);
            break;
    }
}
 
源代码4 项目: Saiy-PS   文件: RecognitionGoogleCloud.java
/**
 * Receives a terminating error from the stream.
 * <p>
 * <p>May only be called once and if called it must be the last method called. In particular if an
 * exception is thrown by an implementation of {@code onError} no further calls to any method are
 * allowed.
 * <p>
 * <p>{@code t} should be a {@link StatusException} or {@link
 * StatusRuntimeException}, but other {@code Throwable} types are possible. Callers should
 * generally convert from a {@link Status} via {@link Status#asException()} or
 * {@link Status#asRuntimeException()}. Implementations should generally convert to a
 * {@code Status} via {@link Status#fromThrowable(Throwable)}.
 *
 * @param throwable the error occurred on the stream
 */
@Override
public void onError(final Throwable throwable) {
    if (DEBUG) {
        MyLog.w(CLS_NAME, "onError");
        throwable.printStackTrace();
        final Status status = Status.fromThrowable(throwable);
        MyLog.w(CLS_NAME, "onError: " + status.toString());
    }

    if (doError.get()) {
        doError.set(false);
        stopListening();
        listener.onError(SpeechRecognizer.ERROR_NETWORK);
    }
}
 
源代码5 项目: Saiy-PS   文件: SaiyRecognitionListener.java
/**
 * A network or recognition error occurred.
 *
 * @param error code is defined in {@link SpeechRecognizer}
 */
@Override
public void onError(final int error) {
    if (DEBUG) {
        MyLog.w(CLS_NAME, "onError: " + error);
        MyLog.w(CLS_NAME, "onError: doEndOfSpeech: " + doEndOfSpeech);
        MyLog.w(CLS_NAME, "onError: doError: " + doError);
        MyLog.i(CLS_NAME, "onError: doBeginningOfSpeech: " + doBeginningOfSpeech);
    }

    if (error != SpeechRecognizer.ERROR_NO_MATCH) {
        doError = true;
    }

    if (doError) {
        onRecognitionError(error);
    }
}
 
源代码6 项目: Saiy-PS   文件: SaiyAccessibilityService.java
/**
 * Process the extracted text as identified as a command
 *
 * @param text the command to process
 */
private void process(@NonNull final String text) {
    if (DEBUG) {
        MyLog.i(CLS_NAME, "process");
    }

    final Bundle bundle = new Bundle();

    final ArrayList<String> voiceResults = new ArrayList<>(1);
    voiceResults.add(text);

    final float[] confidence = new float[1];
    confidence[0] = 1f;

    bundle.putStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION, voiceResults);
    bundle.putFloatArray(SpeechRecognizer.CONFIDENCE_SCORES, confidence);
    bundle.putInt(LocalRequest.EXTRA_CONDITION, Condition.CONDITION_GOOGLE_NOW);

    AsyncTask.execute(new Runnable() {
        @Override
        public void run() {
            new RecognitionAction(SaiyAccessibilityService.this.getApplicationContext(), SPH.getVRLocale(SaiyAccessibilityService.this.getApplicationContext()),
                    SPH.getTTSLocale(SaiyAccessibilityService.this.getApplicationContext()), sl, bundle);
        }
    });
}
 
源代码7 项目: android-speech   文件: Speech.java
@Override
public void onPartialResults(final Bundle bundle) {
    mDelayedStopListening.resetTimer();

    final List<String> partialResults = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    final List<String> unstableData = bundle.getStringArrayList("android.speech.extra.UNSTABLE_TEXT");

    if (partialResults != null && !partialResults.isEmpty()) {
        mPartialData.clear();
        mPartialData.addAll(partialResults);
        mUnstableData = unstableData != null && !unstableData.isEmpty()
                ? unstableData.get(0) : null;
        try {
            if (mLastPartialResults == null || !mLastPartialResults.equals(partialResults)) {
                if (mDelegate != null)
                    mDelegate.onSpeechPartialResults(partialResults);
                mLastPartialResults = partialResults;
            }
        } catch (final Throwable exc) {
            Logger.error(Speech.class.getSimpleName(),
                    "Unhandled exception in delegate onSpeechPartialResults", exc);
        }
    }
}
 
@Override
public void initialize(CordovaInterface cordova, CordovaWebView webView) {
  super.initialize(cordova, webView);

  activity = cordova.getActivity();
  context = webView.getContext();
  view = webView.getView();

  view.post(new Runnable() {
    @Override
    public void run() {
      recognizer = SpeechRecognizer.createSpeechRecognizer(activity);
      SpeechRecognitionListener listener = new SpeechRecognitionListener();
      recognizer.setRecognitionListener(listener);
    }
  });
}
 
@Override
public void onPartialResults(Bundle bundle) {
  ArrayList<String> matches = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
  Log.d(LOG_TAG, "SpeechRecognitionListener partialResults: " + matches);
  JSONArray matchesJSON = new JSONArray(matches);
  try {
    if (matches != null
            && matches.size() > 0
                    && !mLastPartialResults.equals(matchesJSON)) {
      mLastPartialResults = matchesJSON;
      PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, matchesJSON);
      pluginResult.setKeepCallback(true);
      callbackContext.sendPluginResult(pluginResult);
    }
  } catch (Exception e) {
    e.printStackTrace();
    callbackContext.error(e.getMessage());
  }
}
 
源代码10 项目: iqra-android   文件: MainActivity.java
@Override
public void onResults(Bundle results) {
    mIsListening = false;
    micText.setText(getString(R.string.tap_on_mic));
    recordCircle.getLayoutParams().width = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 80, getResources().getDisplayMetrics());
    recordCircle.getLayoutParams().height = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 80, getResources().getDisplayMetrics());
    recordCircle.requestLayout();
    recordCircle.setImageResource(R.drawable.record_circle_inactive);
    partialResult.setText("");
    // Log.d(TAG, "onResults"); //$NON-NLS-1$
    ArrayList<String> matches = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    // matches are the return values of speech recognition engine
    if (matches != null) {
        // Log.d(TAG, matches.toString()); //$NON-NLS-1$
        callApi(matches.get(0));
    } else {
        Toast.makeText(getApplicationContext(), getResources().getString(R.string.cannot_understand), Toast.LENGTH_SHORT).show();
    }
}
 
源代码11 项目: android-chromium   文件: SpeechRecognition.java
private void handleResults(Bundle bundle, boolean provisional) {
    if (mContinuous && provisional) {
        // In continuous mode, Android's recognizer sends final results as provisional.
        provisional = false;
    }

    ArrayList<String> list = bundle.getStringArrayList(
            SpeechRecognizer.RESULTS_RECOGNITION);
    String[] results = list.toArray(new String[list.size()]);

    float[] scores = bundle.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES);

    nativeOnRecognitionResults(mNativeSpeechRecognizerImplAndroid,
                               results,
                               scores,
                               provisional);
}
 
源代码12 项目: 365browser   文件: SpeechRecognition.java
private void handleResults(Bundle bundle, boolean provisional) {
    if (mContinuous && provisional) {
        // In continuous mode, Android's recognizer sends final results as provisional.
        provisional = false;
    }

    ArrayList<String> list = bundle.getStringArrayList(
            SpeechRecognizer.RESULTS_RECOGNITION);
    String[] results = list.toArray(new String[list.size()]);

    float[] scores = bundle.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES);

    nativeOnRecognitionResults(mNativeSpeechRecognizerImplAndroid,
                               results,
                               scores,
                               provisional);
}
 
源代码13 项目: 365browser   文件: SpeechRecognition.java
private SpeechRecognition(long nativeSpeechRecognizerImplAndroid) {
    mContinuous = false;
    mNativeSpeechRecognizerImplAndroid = nativeSpeechRecognizerImplAndroid;
    mListener = new Listener();
    mIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);

    if (sRecognitionProvider != null) {
        mRecognizer = SpeechRecognizer.createSpeechRecognizer(
                ContextUtils.getApplicationContext(), sRecognitionProvider);
    } else {
        // It is possible to force-enable the speech recognition web platform feature (using a
        // command-line flag) even if initialize() failed to find the PROVIDER_PACKAGE_NAME
        // provider, in which case the first available speech recognition provider is used.
        // Caveat: Continuous mode may not work as expected with a different provider.
        mRecognizer =
                SpeechRecognizer.createSpeechRecognizer(ContextUtils.getApplicationContext());
    }

    mRecognizer.setRecognitionListener(mListener);
}
 
protected void initializeRecognizer() {
    if (speechRecognizer != null) {
        return;
    }

    synchronized (speechRecognizerLock) {
        if (speechRecognizer != null) {
            speechRecognizer.destroy();
            speechRecognizer = null;
        }

        final ComponentName component = RecognizerChecker.findGoogleRecognizer(context);
        speechRecognizer = SpeechRecognizer.createSpeechRecognizer(context, component);
        speechRecognizer.setRecognitionListener(new InternalRecognitionListener());
    }
}
 
源代码15 项目: android-chromium   文件: SpeechRecognition.java
private void handleResults(Bundle bundle, boolean provisional) {
    if (mContinuous && provisional) {
        // In continuous mode, Android's recognizer sends final results as provisional.
        provisional = false;
    }

    ArrayList<String> list = bundle.getStringArrayList(
            SpeechRecognizer.RESULTS_RECOGNITION);
    String[] results = list.toArray(new String[list.size()]);

    float[] scores = bundle.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES);

    nativeOnRecognitionResults(mNativeSpeechRecognizerImplAndroid,
                               results,
                               scores,
                               provisional);
}
 
源代码16 项目: android-chromium   文件: SpeechRecognition.java
private SpeechRecognition(final Context context, int nativeSpeechRecognizerImplAndroid) {
    mContext = context;
    mContinuous = false;
    mNativeSpeechRecognizerImplAndroid = nativeSpeechRecognizerImplAndroid;
    mListener = new Listener();
    mIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);

    if (mRecognitionProvider != null) {
        mRecognizer = SpeechRecognizer.createSpeechRecognizer(mContext, mRecognitionProvider);
    } else {
        // It is possible to force-enable the speech recognition web platform feature (using a
        // command-line flag) even if initialize() failed to find the PROVIDER_PACKAGE_NAME
        // provider, in which case the first available speech recognition provider is used.
        // Caveat: Continuous mode may not work as expected with a different provider.
        mRecognizer = SpeechRecognizer.createSpeechRecognizer(mContext);
    }

    mRecognizer.setRecognitionListener(mListener);
}
 
源代码17 项目: adt-leanback-support   文件: SearchBar.java
/**
 * Set the speech recognizer to be used when doing voice search. The Activity/Fragment is in
 * charge of creating and destroying the recognizer with its own lifecycle.
 *
 * @param recognizer a SpeechRecognizer
 */
public void setSpeechRecognizer(SpeechRecognizer recognizer) {
    if (null != mSpeechRecognizer) {
        mSpeechRecognizer.setRecognitionListener(null);
        if (mListening) {
            mSpeechRecognizer.cancel();
            mListening = false;
        }
    }
    mSpeechRecognizer = recognizer;
    if (mSpeechRecognizer != null) {
        enforceAudioRecordPermission();
    }
    if (mSpeechRecognitionCallback != null && mSpeechRecognizer != null) {
        throw new IllegalStateException("Can't have speech recognizer and request");
    }
}
 
@Override
public void onResults(Bundle results) {
    ArrayList<String> data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    if (data.size() > 0) {
        String query = data.get(0);
        if (listener != null) {
            listener.onVoiceRecognitionComplete(query);
        }
    } else {
        if (listener != null) {
            listener.onError(0);
        }
    }
}
 
private void initializeSpeechRecognitionParameters(){

        if(!isSpeechRecognitionAvailable())
            throw new IllegalStateException(context.getString(R.string.speech_not_enabled_exception_text));

         /*
          * Initialize the SpeechRecognitionPermissions and googleIme here
          * for lazy loading the fragments
         */
        initializeGoogleVoiceImeParameters();
        speechRecognitionPermissions = new SpeechRecognitionPermissions();
        ((Activity) context).getFragmentManager()
                .beginTransaction()
                .add(speechRecognitionPermissions, SpeechRecognition.class.getSimpleName())
                .commit();

         /*
         *Initialize the SpeechRecognizer and set listener with onSpeechRecognizerListener implemented by client
         */
        speechRecognizer = SpeechRecognizer.createSpeechRecognizer(context);

        /*
         *Initialize the Speech recognition intent with default Language
         */
        recognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
        recognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
        recognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, context.getPackageName());
        recognizerIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, MAX_RESULT_COUNT);
        recognizerIntent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true);

        /*
         * Only offline recognition works from API level 23
         */
        if(enableOnlyOfflineRecognition){
            if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
                recognizerIntent.putExtra(RecognizerIntent.EXTRA_PREFER_OFFLINE, true);
        }

        //TODO: Set preferred Speech recognition Language
    }
 
@Override
public void onResults(Bundle bundle) {

    //sentence with highest confidence score is in position 0
    ArrayList<String> matches = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);

    if(matches != null && matches.size() > 0){
        String sentence = matches.get(0);

        Log.i(SpeechRecognitionListener.class.getSimpleName(), sentence);
        onSpeechRecognitionListener.OnSpeechRecognitionFinalResult(sentence);

    }else onError(SpeechRecognizer.ERROR_NO_MATCH);
}
 
@Override
public void onPartialResults(Bundle bundle) {
    //sentence with highest confidence score is in position 0
    ArrayList<String> matches = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);

    if(matches != null && matches.size() > 0){
        String word = matches.get(0);

        Log.i(SpeechRecognitionListener.class.getSimpleName(), word);
        onSpeechRecognitionListener.OnSpeechRecognitionCurrentResult(word);

    }else onError(SpeechRecognizer.ERROR_NO_MATCH);
}
 
源代码22 项目: Amadeus   文件: MainActivity.java
public void onResults(Bundle results) {
    String input = "";
    String debug = "";
    Log.d(TAG, "Received results");
    ArrayList data = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);

    for (Object word: data) {
        debug += word + "\n";
    }
    Log.d(TAG, debug);

    input += data.get(0);
    /* TODO: Japanese doesn't split the words. Sigh. */
    String[] splitInput = input.split(" ");

    /* Really, google? */
    if (splitInput[0].equalsIgnoreCase("Асистент")) {
        splitInput[0] = "Ассистент";
    }

    /* Switch language within current context for voice recognition */
    Context context = LangContext.load(getApplicationContext(), contextLang[0]);

    if (splitInput.length > 2 && splitInput[0].equalsIgnoreCase(context.getString(R.string.assistant))) {
        String cmd = splitInput[1].toLowerCase();
        String[] args = new String[splitInput.length - 2];
        System.arraycopy(splitInput, 2, args, 0, splitInput.length - 2);

        if (cmd.contains(context.getString(R.string.open))) {
            Amadeus.openApp(args, MainActivity.this);
        }

    } else {
        Amadeus.responseToInput(input, context, MainActivity.this);
    }
}
 
源代码23 项目: o2oa   文件: ErrorTranslation.java
public static String recogError(int errorCode) {
    String message;
    switch (errorCode) {
        case SpeechRecognizer.ERROR_AUDIO:
            message = "音频问题";
            break;
        case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
            message = "没有语音输入";
            break;
        case SpeechRecognizer.ERROR_CLIENT:
            message = "其它客户端错误";
            break;
        case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
            message = "权限不足";
            break;
        case SpeechRecognizer.ERROR_NETWORK:
            message = "网络问题";
            break;
        case SpeechRecognizer.ERROR_NO_MATCH:
            message = "没有匹配的识别结果";
            break;
        case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
            message = "引擎忙";
            break;
        case SpeechRecognizer.ERROR_SERVER:
            message = "服务端错误";
            break;
        case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
            message = "连接超时";
            break;
        default:
            message = "未知错误:" + errorCode;
            break;
    }
    return message;
}
 
源代码24 项目: DroidSpeech   文件: DroidSpeech.java
/**
 * Initializes the droid speech properties
 */
private void initDroidSpeechProperties()
{
    // Initializing the droid speech recognizer
    droidSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(context);

    // Initializing the speech intent
    speechIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    speechIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    speechIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, context.getPackageName());
    speechIntent.putExtra(RecognizerIntent.EXTRA_PARTIAL_RESULTS, true);
    speechIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, Extensions.MAX_VOICE_RESULTS);
    if(dsProperties.currentSpeechLanguage != null)
    {
        // Setting the speech language
        speechIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, dsProperties.currentSpeechLanguage);
        speechIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE, dsProperties.currentSpeechLanguage);
    }

    if(dsProperties.offlineSpeechRecognition && Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
    {
        // Setting offline speech recognition to true
        speechIntent.putExtra(RecognizerIntent.EXTRA_PREFER_OFFLINE, true);
    }

    // Initializing the audio Manager
    audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
}
 
@Override
public void onPartialResults(final Bundle partialResults) {
    if (recognitionActive) {
        updateStopRunnable(1);
        final ArrayList<String> partialRecognitionResults = partialResults.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        if (partialRecognitionResults != null && !partialRecognitionResults.isEmpty()) {
            GoogleRecognitionServiceImpl.this.onPartialResults(partialRecognitionResults);
        }
    }
}
 
@Override
public void onPartialResults(Bundle bundle) {
  if (bundle.isEmpty()) {
    result = "";
  } else {
    ArrayList<String> results = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
    result = results.get(0);
  }
  speechListener.onPartialResult(result);
}
 
private int getErrorMessage(int errorCode) {
  int errCode = ErrorMessages.ERROR_DEFAULT;
  switch (errorCode) {
    case SpeechRecognizer.ERROR_AUDIO:
      errCode = ErrorMessages.ERROR_AUDIO;
      break;
    case SpeechRecognizer.ERROR_CLIENT:
      errCode = ErrorMessages.ERROR_CLIENT;
      break;
    case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
      errCode = ErrorMessages.ERROR_INSUFFICIENT_PERMISSIONS;
      break;
    case SpeechRecognizer.ERROR_NETWORK:
      errCode = ErrorMessages.ERROR_NETWORK;
      break;
    case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
      errCode = ErrorMessages.ERROR_NETWORK_TIMEOUT;
      break;
    case SpeechRecognizer.ERROR_NO_MATCH:
      errCode = ErrorMessages.ERROR_NO_MATCH;
      break;
    case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
      errCode = ErrorMessages.ERROR_RECOGNIZER_BUSY;
      break;
    case SpeechRecognizer.ERROR_SERVER:
      errCode = ErrorMessages.ERROR_SERVER;
      break;
    case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
      errCode = ErrorMessages.ERROR_SPEECH_TIMEOUT;
      break;
  }
  return errCode;
}
 
源代码28 项目: android-chromium   文件: SpeechRecognition.java
public static boolean initialize(Context context) {
    if (!SpeechRecognizer.isRecognitionAvailable(context))
        return false;

    PackageManager pm = context.getPackageManager();
    Intent intent = new Intent(RecognitionService.SERVICE_INTERFACE);
    final List<ResolveInfo> list = pm.queryIntentServices(intent, PackageManager.GET_SERVICES);

    for (ResolveInfo resolve : list) {
        ServiceInfo service = resolve.serviceInfo;

        if (!service.packageName.equals(PROVIDER_PACKAGE_NAME))
            continue;

        int versionCode;
        try {
            versionCode = pm.getPackageInfo(service.packageName, 0).versionCode;
        } catch (NameNotFoundException e) {
            continue;
        }

        if (versionCode < PROVIDER_MIN_VERSION)
            continue;

        mRecognitionProvider = new ComponentName(service.packageName, service.name);

        return true;
    }

    // If we reach this point, we failed to find a suitable recognition provider.
    return false;
}
 
源代码29 项目: Saiy-PS   文件: RecognitionMicrosoft.java
@Override
public void onPartialResponseReceived(final String partial) {
    if (DEBUG) {
        MyLog.i(CLS_NAME, "onPartialResponseReceived: " + partial);
    }

    partialArray.clear();
    partialBundle.clear();

    partialArray.add(partial);
    partialBundle.putStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION, partialArray);
    listener.onPartialResults(partialBundle);
}
 
@TargetApi(14)
@Override
public void onResults(final Bundle results) {
    if (recognitionActive) {
        final ArrayList<String> recognitionResults = results
                .getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);

        float[] rates = null;

        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
            rates = results.getFloatArray(SpeechRecognizer.CONFIDENCE_SCORES);
        }

        if (recognitionResults == null || recognitionResults.isEmpty()) {
            // empty response
            GoogleRecognitionServiceImpl.this.onResult(new AIResponse());
        } else {
            final AIRequest aiRequest = new AIRequest();
            if (rates != null) {
                aiRequest.setQuery(recognitionResults.toArray(new String[recognitionResults.size()]), rates);
            } else {
                aiRequest.setQuery(recognitionResults.get(0));
            }

            // notify listeners about the last recogntion result for more accurate user feedback
            GoogleRecognitionServiceImpl.this.onPartialResults(recognitionResults);
            GoogleRecognitionServiceImpl.this.sendRequest(aiRequest, requestExtras);
        }
    }
    stopInternal();
}