类android.media.AudioTrack源码实例Demo

下面列出了怎么用android.media.AudioTrack的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: CameraV   文件: MjpegViewerActivity.java
public void initAudio(String vfsPath) throws Exception {

    	isAudio = new BufferedInputStream(new FileInputStream(vfsPath));

    	if (useAAC)
    	{
    		aac = new AACHelper();
    		aac.setDecoder(MediaConstants.sAudioSampleRate, MediaConstants.sAudioChannels, MediaConstants.sAudioBitRate);
    	}
    	else
    	{
	
	        int minBufferSize = AudioTrack.getMinBufferSize(MediaConstants.sAudioSampleRate,
	        		MediaConstants.sChannelConfigOut, AudioFormat.ENCODING_PCM_16BIT)*8;
	
	        at = new AudioTrack(AudioManager.STREAM_MUSIC, MediaConstants.sAudioSampleRate,
	        		MediaConstants.sChannelConfigOut, AudioFormat.ENCODING_PCM_16BIT,
	            minBufferSize, AudioTrack.MODE_STREAM);
	        
    	}
         
    }
 
源代码2 项目: science-journal   文件: AndroidAudioForJSyn.java
@Override
public void start() {
  minBufferSize =
      AudioTrack.getMinBufferSize(
          frameRate, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
  System.out.println("Audio minBufferSize = " + minBufferSize);
  bufferSize = (3 * (minBufferSize / 2)) & ~3;
  System.out.println("Audio bufferSize = " + bufferSize);
  audioTrack =
      new AudioTrack(
          AudioManager.STREAM_MUSIC,
          frameRate,
          AudioFormat.CHANNEL_OUT_STEREO,
          AudioFormat.ENCODING_PCM_16BIT,
          bufferSize,
          AudioTrack.MODE_STREAM);
  audioTrack.play();
}
 
源代码3 项目: AssistantBySDK   文件: PcmPlayer.java
@Override
public void onMarkerReached(AudioTrack track) {
    Log.i(TAG, "onMarkerReached>>>" + track.getNotificationMarkerPosition());
    if (playLock.tryLock()) {
        try {
            playCondition.signalAll();
        } finally {
            playLock.unlock();
        }
    }
    Log.i(TAG, "PCM SIZE=" + pcms.size());
    if (!pending.get() && pcms.size() == 0) {
        play.set(false);
        playListener.onCompleted();
    }
}
 
源代码4 项目: speech-android-sdk   文件: TTSUtility.java
private void initPlayer(){
    stopTtsPlayer();
    // IMPORTANT: minimum required buffer size for the successful creation of an AudioTrack instance in streaming mode.
    int bufferSize = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);

    synchronized (this) {
        audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
                sampleRate,
                AudioFormat.CHANNEL_OUT_MONO,
                AudioFormat.ENCODING_PCM_16BIT,
                bufferSize,
                AudioTrack.MODE_STREAM);
        if (audioTrack != null)
            audioTrack.play();
    }
}
 
源代码5 项目: Telegram-FOSS   文件: DefaultAudioSink.java
private int getDefaultBufferSize() {
  if (isInputPcm) {
    int minBufferSize =
        AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding);
    Assertions.checkState(minBufferSize != ERROR_BAD_VALUE);
    int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR;
    int minAppBufferSize =
        (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize;
    int maxAppBufferSize =
        (int)
            Math.max(
                minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize);
    return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize);
  } else {
    int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding);
    if (outputEncoding == C.ENCODING_AC3) {
      rate *= AC3_BUFFER_MULTIPLICATION_FACTOR;
    }
    return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND);
  }
}
 
源代码6 项目: android-chromium   文件: MediaCodecBridge.java
@CalledByNative
private boolean configureAudio(MediaFormat format, MediaCrypto crypto, int flags,
        boolean playAudio) {
    try {
        mMediaCodec.configure(format, null, crypto, flags);
        if (playAudio) {
            int sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
            int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
            int channelConfig = (channelCount == 1) ? AudioFormat.CHANNEL_OUT_MONO :
                    AudioFormat.CHANNEL_OUT_STEREO;
            // Using 16bit PCM for output. Keep this value in sync with
            // kBytesPerAudioOutputSample in media_codec_bridge.cc.
            int minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig,
                    AudioFormat.ENCODING_PCM_16BIT);
            mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, channelConfig,
                    AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
        }
        return true;
    } catch (IllegalStateException e) {
        Log.e(TAG, "Cannot configure the audio codec " + e.toString());
    }
    return false;
}
 
源代码7 项目: MediaSDK   文件: AudioTrackPositionTracker.java
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
源代码8 项目: Android   文件: AudioPlayer.java
public boolean startPlayer(int streamType, int sampleRateInHz, int channelConfig, int audioFormat) {
    
    if (mIsPlayStarted) {
        Log.e(TAG, "Player already started !");
        return false;
    }
    
    mMinBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz,channelConfig,audioFormat);
    if (mMinBufferSize == AudioTrack.ERROR_BAD_VALUE) {
        Log.e(TAG, "Invalid parameter !");
        return false;
    }
    Log.d(TAG , "getMinBufferSize = "+mMinBufferSize+" bytes !");
    
    mAudioTrack = new AudioTrack(streamType,sampleRateInHz,channelConfig,audioFormat,mMinBufferSize,DEFAULT_PLAY_MODE);
    if (mAudioTrack.getState() == AudioTrack.STATE_UNINITIALIZED) {
        Log.e(TAG, "AudioTrack initialize fail !");
        return false;
    }            
    
    mIsPlayStarted = true;
    
    Log.d(TAG, "Start audio player success !");
    
    return true;
}
 
/**
 * Stop stream started with @startStream.
 */
public void stopStream() {
  if (streaming) {
    streaming = false;
    stopStreamRtp();
  }
  if (!recordController.isRecording()) {
    if (glInterface != null) {
      glInterface.removeMediaCodecSurface();
      glInterface.stop();
    }
    if (videoDecoder != null) videoDecoder.stop();
    if (audioDecoder != null) audioDecoder.stop();
    if (audioTrackPlayer != null
        && audioTrackPlayer.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
      audioTrackPlayer.stop();
    }
    audioTrackPlayer = null;
    videoEncoder.stop();
    audioEncoder.stop();
    recordController.resetFormats();
  }
}
 
源代码10 项目: webrtc_android   文件: WebRtcAudioTrack.java
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
  // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
  // performance when Android O is supported. Add some logging in the mean time.
  final int nativeOutputSampleRate =
      AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
  Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
  if (sampleRateInHz != nativeOutputSampleRate) {
    Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
  }
  // Create an audio track where the audio usage is for VoIP and the content type is speech.
  return new AudioTrack(new AudioAttributes.Builder()
                            .setUsage(DEFAULT_USAGE)
                            .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
                            .build(),
      new AudioFormat.Builder()
          .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
          .setSampleRate(sampleRateInHz)
          .setChannelMask(channelConfig)
          .build(),
      bufferSizeInBytes, AudioTrack.MODE_STREAM, AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码11 项目: android_9.0.0_r45   文件: BlockingAudioTrack.java
private static int writeToAudioTrack(AudioTrack audioTrack, byte[] bytes) {
    if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
        if (DBG) Log.d(TAG, "AudioTrack not playing, restarting : " + audioTrack.hashCode());
        audioTrack.play();
    }

    int count = 0;
    while (count < bytes.length) {
        // Note that we don't take bufferCopy.mOffset into account because
        // it is guaranteed to be 0.
        int written = audioTrack.write(bytes, count, bytes.length);
        if (written <= 0) {
            break;
        }
        count += written;
    }
    return count;
}
 
源代码12 项目: android_9.0.0_r45   文件: BlockingAudioTrack.java
private static void setupVolume(AudioTrack audioTrack, float volume, float pan) {
    final float vol = clip(volume, 0.0f, 1.0f);
    final float panning = clip(pan, -1.0f, 1.0f);

    float volLeft = vol;
    float volRight = vol;
    if (panning > 0.0f) {
        volLeft *= (1.0f - panning);
    } else if (panning < 0.0f) {
        volRight *= (1.0f + panning);
    }
    if (DBG) Log.d(TAG, "volLeft=" + volLeft + ",volRight=" + volRight);
    if (audioTrack.setStereoVolume(volLeft, volRight) != AudioTrack.SUCCESS) {
        Log.e(TAG, "Failed to set volume");
    }
}
 
源代码13 项目: DeviceConnect-Android   文件: OpusTrack.java
/**
 * 指定されたサンプリングレートとチャンネル数で AudioTrack を作成します.
 */
private void createAudioTrack() {
    int bufSize = AudioTrack.getMinBufferSize(mSamplingRate,
            mChannel == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_16BIT) * 2;

    if (DEBUG) {
        Log.d(TAG, "OpusTrack::createAudioTrack");
        Log.d(TAG, "  SamplingRate: " + mSamplingRate);
        Log.d(TAG, "  Channels: " + mChannel);
        Log.d(TAG, "  AudioFormat: " + AudioFormat.ENCODING_PCM_16BIT);
        Log.d(TAG, "  BufSize: " + bufSize);
    }

    mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            mSamplingRate,
            mChannel == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_16BIT, bufSize,
            AudioTrack.MODE_STREAM);

    mAudioTrack.play();
}
 
/**
 * Sets the {@link AudioTrack} to wrap. Subsequent method calls on this instance relate to this
 * track's position, until the next call to {@link #reset()}.
 *
 * @param audioTrack The audio track to wrap.
 * @param outputEncoding The encoding of the audio track.
 * @param outputPcmFrameSize For PCM output encodings, the frame size. The value is ignored
 *     otherwise.
 * @param bufferSize The audio track buffer size in bytes.
 */
public void setAudioTrack(
    AudioTrack audioTrack,
    @C.Encoding int outputEncoding,
    int outputPcmFrameSize,
    int bufferSize) {
  this.audioTrack = audioTrack;
  this.outputPcmFrameSize = outputPcmFrameSize;
  this.bufferSize = bufferSize;
  audioTimestampPoller = new AudioTimestampPoller(audioTrack);
  outputSampleRate = audioTrack.getSampleRate();
  needsPassthroughWorkarounds = needsPassthroughWorkarounds(outputEncoding);
  isOutputPcm = Util.isEncodingLinearPcm(outputEncoding);
  bufferSizeUs = isOutputPcm ? framesToDurationUs(bufferSize / outputPcmFrameSize) : C.TIME_UNSET;
  lastRawPlaybackHeadPosition = 0;
  rawPlaybackHeadWrapCount = 0;
  passthroughWorkaroundPauseOffset = 0;
  hasData = false;
  stopTimestampUs = C.TIME_UNSET;
  forceResetWorkaroundTimeMs = C.TIME_UNSET;
  latencyUs = 0;
}
 
@Override
public boolean stopRenderer() {
    Log.d("AUDIO_FOCUS", "Stop Renderer");

    rendererLock.lock();
    try {
        // only stop if we are playing
        if (audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
            // stop playout
            audioTrack.stop();

        }
        // flush the buffers
        audioTrack.flush();
    } catch (Exception e) {
        throw new RuntimeException(e.getMessage());
    } finally {
        // Ensure we always unlock, both for success, exception or error
        // return.
        isRendering = false;
        rendererLock.unlock();
    }
    unregisterHeadsetReceiver();
    unregisterBtReceiver();
    return true;
}
 
源代码16 项目: RxAndroidAudio   文件: StreamAudioPlayer.java
@WorkerThread
public synchronized boolean play(byte[] data, int size) {
    if (mAudioTrack != null) {
        try {
            int ret = mAudioTrack.write(data, 0, size);
            switch (ret) {
                case AudioTrack.ERROR_INVALID_OPERATION:
                    Log.w(TAG, "play fail: ERROR_INVALID_OPERATION");
                    return false;
                case AudioTrack.ERROR_BAD_VALUE:
                    Log.w(TAG, "play fail: ERROR_BAD_VALUE");
                    return false;
                case AudioManager.ERROR_DEAD_OBJECT:
                    Log.w(TAG, "play fail: ERROR_DEAD_OBJECT");
                    return false;
                default:
                    return true;
            }
        } catch (IllegalStateException e) {
            Log.w(TAG, "play fail: " + e.getMessage());
            return false;
        }
    }
    Log.w(TAG, "play fail: null mAudioTrack");
    return false;
}
 
源代码17 项目: CameraV   文件: MjpegPlayerActivity.java
public void initAudio(String vfsPath) throws Exception {

    	isAudio = new BufferedInputStream(new FileInputStream(vfsPath));

    	if (useAAC)
    	{
    		aac = new AACHelper();
    		aac.setDecoder(MediaConstants.sAudioSampleRate, MediaConstants.sAudioChannels, MediaConstants.sAudioBitRate);
    	}
    	else
    	{
	
	        int minBufferSize = AudioTrack.getMinBufferSize(MediaConstants.sAudioSampleRate,
	        		MediaConstants.sChannelConfigOut, AudioFormat.ENCODING_PCM_16BIT)*8;
	
	        at = new AudioTrack(AudioManager.STREAM_MUSIC, MediaConstants.sAudioSampleRate,
	        		MediaConstants.sChannelConfigOut, AudioFormat.ENCODING_PCM_16BIT,
	            minBufferSize, AudioTrack.MODE_STREAM);
	        
    	}
         
    }
 
源代码18 项目: Viewer   文件: AudioThread.java
public AudioThread(int sampleRateInHz, int channel, long streamId, long decoderId, Media media)
{
	if (channel == 1)
	{
		channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
	} else
	{
		channel_configuration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
	}
	this.mediaStreamId = streamId;
	this.decoderId = decoderId;
	this.media = media;
	int minBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT);
	if (minBufferSize > audioLength)
	{
		audioLength = minBufferSize;
	}
	mAudioBuffer = new byte[audioLength];
	mAudio = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz, channel_configuration, AudioFormat.ENCODING_PCM_16BIT, audioLength, AudioTrack.MODE_STREAM);
}
 
源代码19 项目: apollo-DuerOS   文件: AudioTrackManagerSingle.java
public void pauseAudioTrack() {
    if ((mAudioTrack != null) && (mAudioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING)) {
        try {
            mAudioTrack.pause();
        } catch (IllegalStateException e) {
            e.printStackTrace();
        }

        // release audio track focus
        releaseAudioTrackFocus();
    }
}
 
@Override
public boolean onInitRenderer() {
    int bytesPerFrame = getRendererFormat().getChannelCount() * (BITS_PER_SAMPLE / 8);
    readByteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * (getRendererFormat().getSampleRate() / BUFFERS_PER_SECOND));
    int channelConfig = channelCountToConfiguration(getRendererFormat().getChannelCount());
    int minBufferSize = AudioRecord.getMinBufferSize(getRendererFormat().getSampleRate(), channelConfig, android.media.AudioFormat.ENCODING_PCM_16BIT);
    audioTrack = new AudioTrack(AudioManager.STREAM_VOICE_CALL, getRendererFormat().getSampleRate(), channelConfig,
            android.media.AudioFormat.ENCODING_PCM_16BIT, minBufferSize, AudioTrack.MODE_STREAM);
    keepAliveRendererRunnable = true;
    return true;
}
 
private void reInitAudioTrack() {
    try {
        mMusicAudioTrack =
                new AudioTrack(AudioManager.STREAM_MUSIC, mPreMediaSampleRate, mPreMediaChannelConfig,
                        mPreMediaFormate, mPreMinBuffSize, AudioTrack.MODE_STREAM);

    } catch (IllegalArgumentException e) {
        informMusicPause();
        mMusicAudioTrack = null;
        e.printStackTrace();
    }
}
 
源代码22 项目: android-chromium   文件: MediaCodecBridge.java
@CalledByNative
private void playOutputBuffer(byte[] buf) {
    if (mAudioTrack != null) {
        if (AudioTrack.PLAYSTATE_PLAYING != mAudioTrack.getPlayState()) {
            mAudioTrack.play();
        }
        int size = mAudioTrack.write(buf, 0, buf.length);
        if (buf.length != size) {
            Log.i(TAG, "Failed to send all data to audio output, expected size: " +
                    buf.length + ", actual size: " + size);
        }
    }
}
 
源代码23 项目: MediaSDK   文件: DefaultAudioSink.java
private static AudioTrack initializeKeepSessionIdAudioTrack(int audioSessionId) {
  int sampleRate = 4000; // Equal to private AudioTrack.MIN_SAMPLE_RATE.
  int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
  @C.PcmEncoding int encoding = C.ENCODING_PCM_16BIT;
  int bufferSize = 2; // Use a two byte buffer, as it is not actually used for playback.
  return new AudioTrack(C.STREAM_TYPE_DEFAULT, sampleRate, channelConfig, encoding, bufferSize,
      MODE_STATIC, audioSessionId);
}
 
源代码24 项目: NetEasyNews   文件: MediaPlayer.java
private void audioTrackRelease() {
  if (mAudioTrack != null) {
    if (mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
      mAudioTrack.stop();
    mAudioTrack.release();
  }
  mAudioTrack = null;
}
 
源代码25 项目: Telegram   文件: AudioTimestampPoller.java
/**
 * Creates a new audio timestamp poller.
 *
 * @param audioTrack The audio track that will provide timestamps, if the platform supports it.
 */
public AudioTimestampPoller(AudioTrack audioTrack) {
  if (Util.SDK_INT >= 19) {
    audioTimestamp = new AudioTimestampV19(audioTrack);
    reset();
  } else {
    audioTimestamp = null;
    updateState(STATE_NO_TIMESTAMP);
  }
}
 
源代码26 项目: video-player   文件: MediaPlayer.java
private void audioTrackRelease() {
  if (mAudioTrack != null) {
    if (mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
      mAudioTrack.stop();
    mAudioTrack.release();
  }
  mAudioTrack = null;
}
 
源代码27 项目: jsyn   文件: AndroidAudioForJSyn.java
public void write(double[] buffer, int start, int count) {
    // Allocate buffer if needed.
    if ((floatBuffer == null) || (floatBuffer.length < count)) {
        floatBuffer = new float[count];
    }
    // Convert float samples to shorts.
    for (int i = 0; i < count; i++) {
        floatBuffer[i] = (float) buffer[i + start];
    }
    audioTrack.write(floatBuffer, 0, count, AudioTrack.WRITE_BLOCKING);
}
 
源代码28 项目: webrtc_android   文件: WebRtcAudioTrack.java
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
  // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
  // performance when Android O is supported. Add some logging in the mean time.
  final int nativeOutputSampleRate =
      AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
  Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
  if (sampleRateInHz != nativeOutputSampleRate) {
    Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
  }
  if (usageAttribute != DEFAULT_USAGE) {
    Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
  }
  // Create an audio track where the audio usage is for VoIP and the content type is speech.
  return new AudioTrack(
      new AudioAttributes.Builder()
          .setUsage(usageAttribute)
          .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
      .build(),
      new AudioFormat.Builder()
        .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
        .setSampleRate(sampleRateInHz)
        .setChannelMask(channelConfig)
        .build(),
      bufferSizeInBytes,
      AudioTrack.MODE_STREAM,
      AudioManager.AUDIO_SESSION_ID_GENERATE);
}
 
源代码29 项目: Vitamio   文件: MediaPlayer.java
private void audioTrackRelease() {
  if (mAudioTrack != null) {
    if (mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
      mAudioTrack.stop();
    mAudioTrack.release();
  }
  mAudioTrack = null;
}
 
源代码30 项目: droidkit-webrtc   文件: WebRtcAudioTrack.java
@SuppressWarnings("unused")
private int StopPlayback() {
    _playLock.lock();
    try {
        // only stop if we are playing
        if (_audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING) {
            // stop playout
            try {
                _audioTrack.stop();
            } catch (IllegalStateException e) {
                e.printStackTrace();
                return -1;
            }

            // flush the buffers
            _audioTrack.flush();
        }

        // release the object
        _audioTrack.release();
        _audioTrack = null;

    } finally {
        // Ensure we always unlock, both for success, exception or error
        // return.
        _doPlayInit = true;
        _playLock.unlock();
    }

    _isPlaying = false;
    return 0;
}
 
 类所在包
 同包方法