android.media.AudioManager#AUDIO_SESSION_ID_GENERATE源码实例Demo

下面列出了android.media.AudioManager#AUDIO_SESSION_ID_GENERATE 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: Android-Guitar-Tuner   文件: AndroidAudioPlayer.java
public AndroidAudioPlayer(final AudioConfig audioConfig) {
    AudioAttributes audioAttributes = new AudioAttributes.Builder()
            .setLegacyStreamType(AudioManager.STREAM_MUSIC)
            .setUsage(AudioAttributes.USAGE_MEDIA)
            .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
            .build();

    AudioFormat audioFormat = new AudioFormat.Builder()
            .setChannelMask(audioConfig.getOutputChannel())
            .setEncoding(audioConfig.getOutputFormat())
            .setSampleRate(audioConfig.getSampleRate())
            .build();

    audioTrack = new AudioTrack(audioAttributes,
            audioFormat,
            audioConfig.getOutputBufferSize(),
            AudioTrack.MODE_STATIC,
            AudioManager.AUDIO_SESSION_ID_GENERATE);

    outputByteCount = audioConfig.getOutputFormatByteCount();
}
 
源代码2 项目: webrtc_android   文件: WebRtcAudioTrack.java
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
  // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
  // performance when Android O is supported. Add some logging in the mean time.
  final int nativeOutputSampleRate =
      AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
  Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
  if (sampleRateInHz != nativeOutputSampleRate) {
    Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
  }
  // Create an audio track where the audio usage is for VoIP and the content type is speech.
  return new AudioTrack(new AudioAttributes.Builder()
                            .setUsage(DEFAULT_USAGE)
                            .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
                            .build(),
      new AudioFormat.Builder()
          .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
          .setSampleRate(sampleRateInHz)
          .setChannelMask(channelConfig)
          .build(),
      bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码3 项目: TelePlus-Android   文件: DefaultAudioSink.java
@TargetApi(21)
private AudioTrack createAudioTrackV21() {
  android.media.AudioAttributes attributes;
  if (tunneling) {
    attributes = new android.media.AudioAttributes.Builder()
        .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
        .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
        .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
        .build();
  } else {
    attributes = audioAttributes.getAudioAttributesV21();
  }
  AudioFormat format =
      new AudioFormat.Builder()
          .setChannelMask(outputChannelConfig)
          .setEncoding(outputEncoding)
          .setSampleRate(outputSampleRate)
          .build();
  int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
      : AudioManager.AUDIO_SESSION_ID_GENERATE;
  return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId);
}
 
源代码4 项目: TelePlus-Android   文件: DefaultAudioSink.java
@TargetApi(21)
private AudioTrack createAudioTrackV21() {
  android.media.AudioAttributes attributes;
  if (tunneling) {
    attributes = new android.media.AudioAttributes.Builder()
        .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
        .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
        .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
        .build();
  } else {
    attributes = audioAttributes.getAudioAttributesV21();
  }
  AudioFormat format =
      new AudioFormat.Builder()
          .setChannelMask(outputChannelConfig)
          .setEncoding(outputEncoding)
          .setSampleRate(outputSampleRate)
          .build();
  int audioSessionId = this.audioSessionId != C.AUDIO_SESSION_ID_UNSET ? this.audioSessionId
      : AudioManager.AUDIO_SESSION_ID_GENERATE;
  return new AudioTrack(attributes, format, bufferSize, MODE_STREAM, audioSessionId);
}
 
源代码5 项目: MediaSDK   文件: DefaultAudioSink.java
@TargetApi(21)
private AudioTrack createAudioTrackV21(
    boolean tunneling, AudioAttributes audioAttributes, int audioSessionId) {
  android.media.AudioAttributes attributes;
  if (tunneling) {
    attributes =
        new android.media.AudioAttributes.Builder()
            .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
            .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
            .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
            .build();
  } else {
    attributes = audioAttributes.getAudioAttributesV21();
  }
  AudioFormat format =
      new AudioFormat.Builder()
          .setChannelMask(outputChannelConfig)
          .setEncoding(outputEncoding)
          .setSampleRate(outputSampleRate)
          .build();
  return new AudioTrack(
      attributes,
      format,
      bufferSize,
      MODE_STREAM,
      audioSessionId != C.AUDIO_SESSION_ID_UNSET
          ? audioSessionId
          : AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码6 项目: webrtc_android   文件: WebRtcAudioTrack.java
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
  // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
  // performance when Android O is supported. Add some logging in the mean time.
  final int nativeOutputSampleRate =
      AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
  Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
  if (sampleRateInHz != nativeOutputSampleRate) {
    Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
  }
  if (usageAttribute != DEFAULT_USAGE) {
    Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
  }
  // Create an audio track where the audio usage is for VoIP and the content type is speech.
  return new AudioTrack(
      new AudioAttributes.Builder()
          .setUsage(usageAttribute)
          .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
      .build(),
      new AudioFormat.Builder()
        .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
        .setSampleRate(sampleRateInHz)
        .setChannelMask(channelConfig)
        .build(),
      bufferSizeInBytes,
      AudioTrack.MODE_STREAM,
      AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码7 项目: android_9.0.0_r45   文件: TextToSpeechService.java
/** Create AudioOutputParams with default values */
AudioOutputParams() {
    mSessionId = AudioManager.AUDIO_SESSION_ID_GENERATE;
    mVolume = Engine.DEFAULT_VOLUME;
    mPan = Engine.DEFAULT_PAN;
    mAudioAttributes = null;
}
 
源代码8 项目: Telegram-FOSS   文件: DefaultAudioSink.java
@TargetApi(21)
private AudioTrack createAudioTrackV21(
    boolean tunneling, AudioAttributes audioAttributes, int audioSessionId) {
  android.media.AudioAttributes attributes;
  if (tunneling) {
    attributes =
        new android.media.AudioAttributes.Builder()
            .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
            .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
            .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
            .build();
  } else {
    attributes = audioAttributes.getAudioAttributesV21();
  }
  AudioFormat format =
      new AudioFormat.Builder()
          .setChannelMask(outputChannelConfig)
          .setEncoding(outputEncoding)
          .setSampleRate(outputSampleRate)
          .build();
  return new AudioTrack(
      attributes,
      format,
      bufferSize,
      MODE_STREAM,
      audioSessionId != C.AUDIO_SESSION_ID_UNSET
          ? audioSessionId
          : AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码9 项目: Telegram   文件: DefaultAudioSink.java
@TargetApi(21)
private AudioTrack createAudioTrackV21(
    boolean tunneling, AudioAttributes audioAttributes, int audioSessionId) {
  android.media.AudioAttributes attributes;
  if (tunneling) {
    attributes =
        new android.media.AudioAttributes.Builder()
            .setContentType(android.media.AudioAttributes.CONTENT_TYPE_MOVIE)
            .setFlags(android.media.AudioAttributes.FLAG_HW_AV_SYNC)
            .setUsage(android.media.AudioAttributes.USAGE_MEDIA)
            .build();
  } else {
    attributes = audioAttributes.getAudioAttributesV21();
  }
  AudioFormat format =
      new AudioFormat.Builder()
          .setChannelMask(outputChannelConfig)
          .setEncoding(outputEncoding)
          .setSampleRate(outputSampleRate)
          .build();
  return new AudioTrack(
      attributes,
      format,
      bufferSize,
      MODE_STREAM,
      audioSessionId != C.AUDIO_SESSION_ID_UNSET
          ? audioSessionId
          : AudioManager.AUDIO_SESSION_ID_GENERATE);
}