android.media.AudioFormat#ENCODING_PCM_16BIT ( )源码实例Demo

下面列出了android.media.AudioFormat#ENCODING_PCM_16BIT ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

/**
 * @param filePath to video MP4 file.
 * @param bitRate AAC in kb.
 * @return true if success, false if you get a error (Normally because the encoder selected
 * doesn't support any configuration seated or your device hasn't a H264 encoder).
 * @throws IOException Normally file not found.
 */
public boolean prepareAudio(String filePath, int bitRate) throws IOException {
  audioPath = filePath;
  audioDecoder = new AudioDecoder(this, audioDecoderInterface, this);
  if (!audioDecoder.initExtractor(filePath)) return false;
  boolean result = audioEncoder.prepareAudioEncoder(bitRate, audioDecoder.getSampleRate(),
      audioDecoder.isStereo(), 0);
  prepareAudioRtp(audioDecoder.isStereo(), audioDecoder.getSampleRate());
  audioDecoder.prepareAudio();
  if (glInterface != null && !(glInterface instanceof OffScreenGlThread)) {
    int channel =
        audioDecoder.isStereo() ? AudioFormat.CHANNEL_OUT_STEREO : AudioFormat.CHANNEL_OUT_MONO;
    int buffSize = AudioTrack.getMinBufferSize(audioDecoder.getSampleRate(), channel,
        AudioFormat.ENCODING_PCM_16BIT);
    audioTrackPlayer =
        new AudioTrack(AudioManager.STREAM_MUSIC, audioDecoder.getSampleRate(), channel,
            AudioFormat.ENCODING_PCM_16BIT, buffSize, AudioTrack.MODE_STREAM);
  }
  return result;
}
 
源代码2 项目: EvilsLive   文件: AudioCapture.java
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
        int numOfChannels, bitsPersample;
        if (channelConfig == AudioFormat.CHANNEL_IN_MONO) {
            numOfChannels = 1;
        } else {
            numOfChannels = 2;
        }
        if (AudioFormat.ENCODING_PCM_16BIT == audioFormat) {
            bitsPersample = 16;
        } else {
            bitsPersample = 8;
        }
        int periodInFrames = sampleRate * TIMER_INTERVAL / 1000;		//num of frames in a second is same as sample rate
        //refer to android/4.1.1/frameworks/av/media/libmedia/AudioRecord.cpp, AudioRecord::getMinFrameCount method
        //we times 2 for ping pong use of record buffer
        mMinBufferSize = periodInFrames * 2  * numOfChannels * bitsPersample / 8;
        if (mMinBufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) {
            // Check to make sure buffer size is not smaller than the smallest allowed one
            mMinBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
            // Set frame period and timer interval accordingly
//            periodInFrames = mMinBufferSize / ( 2 * bitsPersample * numOfChannels / 8 );
        }

        return mMinBufferSize;
    }
 
源代码3 项目: ssj   文件: Microphone.java
public static int audioFormatSampleBytes(int f)
{
    switch (f)
    {
        case AudioFormat.ENCODING_PCM_8BIT:
            return 1;
        case AudioFormat.ENCODING_PCM_16BIT:
        case AudioFormat.ENCODING_DEFAULT:
            return 2;
        case AudioFormat.ENCODING_PCM_FLOAT:
            return 4;
        case AudioFormat.ENCODING_INVALID:
        default:
            return 0;
    }
}
 
源代码4 项目: AlexaAndroid   文件: SpeechRecord.java
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes)
        throws IllegalArgumentException {

    this(
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            sampleRateInHz,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSizeInBytes,
            false,
            false,
            false
    );
}
 
源代码5 项目: VideoAndroid   文件: Recorder.java
@Override
public void run() {
    android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

    // Audio
    int bufferSize;
    ShortBuffer audioData;
    int bufferReadResult;

    bufferSize = AudioRecord.getMinBufferSize(sampleAudioRateInHz,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleAudioRateInHz,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);

    audioData = ShortBuffer.allocate(bufferSize);

    mAudioRecord.startRecording();

    /* ffmpeg_audio encoding loop */
    while (mRunAudioThread) {
        //获取音频数据
        bufferReadResult = mAudioRecord.read(audioData.array(), 0, audioData.capacity());
        audioData.limit(bufferReadResult);
        if (bufferReadResult > 0) {
            if(mFFmpegFrameRecorder != null && mRecording) {
                try {
                    mFFmpegFrameRecorder.recordSamples(audioData);      //写入音频数据
                } catch (FFmpegFrameRecorder.Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /* encoding finish, release recorder */
    if (mAudioRecord != null) {
        mAudioRecord.stop();
        mAudioRecord.release();
    }
}
 
源代码6 项目: speechutils   文件: SpeechRecord.java
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes)
        throws IllegalArgumentException {

    this(
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            sampleRateInHz,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSizeInBytes,
            false,
            false,
            false
    );
}
 
源代码7 项目: cythara   文件: AndroidAudioPlayer.java
/**
 * Constructs a new AndroidAudioPlayer from an audio format, default buffer size and stream type.
 *
 * @param audioFormat The audio format of the stream that this AndroidAudioPlayer will process.
 *                    This can only be 1 channel, PCM 16 bit.
 * @param bufferSizeInSamples  The requested buffer size in samples.
 * @param streamType  The type of audio stream that the internal AudioTrack should use. For
 *                    example, {@link AudioManager#STREAM_MUSIC}.
 * @throws IllegalArgumentException if audioFormat is not valid or if the requested buffer size is invalid.
 * @see AudioTrack
 */
public AndroidAudioPlayer(TarsosDSPAudioFormat audioFormat, int bufferSizeInSamples, int streamType) {
    if (audioFormat.getChannels() != 1) {
        throw new IllegalArgumentException("TarsosDSP only supports mono audio channel count: " + audioFormat.getChannels());
    }

    // The requested sample rate
    int sampleRate = (int) audioFormat.getSampleRate();

    //The buffer size in bytes is twice the buffer size expressed in samples if 16bit samples are used:
    int bufferSizeInBytes = bufferSizeInSamples * audioFormat.getSampleSizeInBits()/8;

    // From the Android API about getMinBufferSize():
    // The total size (in bytes) of the internal buffer where audio data is read from for playback.
    // If track's creation mode is MODE_STREAM, you can write data into this buffer in chunks less than or equal to this size,
    // and it is typical to use chunks of 1/2 of the total size to permit double-buffering. If the track's creation mode is MODE_STATIC,
    // this is the maximum length sample, or audio clip, that can be played by this instance. See getMinBufferSize(int, int, int) to determine
    // the minimum required buffer size for the successful creation of an AudioTrack instance in streaming mode. Using values smaller
    // than getMinBufferSize() will result in an initialization failure.
    int minBufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO,  AudioFormat.ENCODING_PCM_16BIT);
    if(minBufferSizeInBytes > bufferSizeInBytes){
        throw new IllegalArgumentException("The buffer size should be at least " + (minBufferSizeInBytes/(audioFormat.getSampleSizeInBits()/8)) + " (samples) according to  AudioTrack.getMinBufferSize().");
    }

    //http://developer.android.com/reference/android/media/AudioTrack.html#AudioTrack(int, int, int, int, int, int)
    audioTrack = new AudioTrack(streamType, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes,AudioTrack.MODE_STREAM);

    audioTrack.play();
}
 
源代码8 项目: CameraV   文件: AudioRecorderActivity.java
private void initAudio(final String audioPath) throws Exception {

			fileAudio  = new File(audioPath); 
			
			   outputStreamAudio = new BufferedOutputStream(new info.guardianproject.iocipher.FileOutputStream(fileAudio),8192*8);
				
			   if (useAAC)
			   {
				   aac = new AACHelper();
				   aac.setEncoder(MediaConstants.sAudioSampleRate, MediaConstants.sAudioChannels, MediaConstants.sAudioBitRate);
			   }
			   else
			   {
			   
				   int minBufferSize = AudioRecord.getMinBufferSize(MediaConstants.sAudioSampleRate, 
					MediaConstants.sChannelConfigIn, 
				     AudioFormat.ENCODING_PCM_16BIT)*8;
				   
				   audioData = new byte[minBufferSize];
	
				   int audioSource = MediaRecorder.AudioSource.CAMCORDER;
				   // audioSource = MediaRecorder.AudioSource.MIC;
				   
				   audioRecord = new AudioRecord(audioSource,
						   MediaConstants.sAudioSampleRate,
						   MediaConstants.sChannelConfigIn,
				     AudioFormat.ENCODING_PCM_16BIT,
				     minBufferSize);
			   }
	 }
 
源代码9 项目: AlexaAndroid   文件: SpeechRecord.java
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes, boolean noise, boolean gain, boolean echo)
        throws IllegalArgumentException {

    this(
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            sampleRateInHz,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            bufferSizeInBytes,
            noise,
            gain,
            echo
    );
}
 
源代码10 项目: TikTok   文件: AudioRecorder.java
@Override
    public void run() {
        try {
            //初始化音频
            int bufferSizeInBytes = AudioRecord
                    .getMinBufferSize(audioSampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
            final AudioRecord
                    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, audioSampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
            if(audioRecord == null){
                mOnAudioRecorderListener.onNotPermission();
                return ;
            }
            audioRecord.startRecording();

            /**
             * 根据开始录音判断是否有录音权限
             */
            if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING
                    && audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_STOPPED) {
//            AVLogUtils.e(TAG, "audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING : " + audioRecord.getRecordingState());
                isAudioPermission = false;
            }

            if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
                //如果短时间内频繁检测,会造成audioRecord还未销毁完成,此时检测会返回RECORDSTATE_STOPPED状态,再去read,会读到0的size,可以更具自己的需求返回true或者false
                isAudioPermission = false;
            }

            if(!isAudioPermission){
                mOnAudioRecorderListener.onNotPermission();
                return ;
            }
            mOnAudioRecorderListener.onCanRecord(isAudioPermission);

            byte[] data = new byte[2048];
            while(isRecord){
                if(audioRecord == null){
                    return ;
                }
                int offset = 0;
                while(offset < 2048) {
                    int readSize = audioRecord.read(data, offset, data.length-offset);
                    offset+=readSize;
                }
                if(isAudioRecordWrite){//写入文件
                    HeyhouRecorder.getInstance().recordAudioNHW(data,audioSampleRate,HeyhouRecorder.FORMAT_S16,1024);
                }

            }
            audioRecord.stop();
            audioRecord.release();
        }catch (Exception e) {
            e.printStackTrace();
            mOnAudioRecorderListener.onRecordError("录音失败");
        }
    }
 
源代码11 项目: Saiy-PS   文件: AudioParameters.java
public static AudioParameters getDefaultBeyondVerbal(){
    return new AudioParameters(AudioFormat.ENCODING_PCM_16BIT,
            MediaRecorder.AudioSource.VOICE_RECOGNITION,
            AudioFormat.CHANNEL_IN_MONO, 1, 8000, 16);
}
 
源代码12 项目: WeiXinRecordedDemo   文件: RecordUtil.java
private void initAudioRecord(){
    audioBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT, audioBufferSize);
}
 
源代码13 项目: Android-Audio-Recorder   文件: RawSamples.java
public static long getSamples(long len) {
    return len / (AUDIO_FORMAT == AudioFormat.ENCODING_PCM_16BIT ? 2 : 1);
}
 
源代码14 项目: webrtc_android   文件: WebRtcAudioTrack.java
@SuppressWarnings("deprecation") // Deprecated in API level 25.
private static AudioTrack createAudioTrackOnLowerThanLollipop(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  return new AudioTrack(AudioManager.STREAM_VOICE_CALL, sampleRateInHz, channelConfig,
      AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes, AudioTrack.MODE_STREAM);
}
 
源代码15 项目: CameraView   文件: CheckPermission.java
/**
 * 用于检测是否具有录音权限
 *
 * @return
 */
public static int getRecordState() {
    int minBuffer = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat
            .ENCODING_PCM_16BIT);
    AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, 44100, AudioFormat
            .CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, (minBuffer * 100));
    short[] point = new short[minBuffer];
    int readSize = 0;
    try {

        audioRecord.startRecording();//检测是否可以进入初始化状态
    } catch (Exception e) {
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }
        return STATE_NO_PERMISSION;
    }
    if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
        //6.0以下机型都会返回此状态,故使用时需要判断bulid版本
        //检测是否在录音中
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
            audioRecord = null;
            Log.d("CheckAudioPermission", "录音机被占用");
        }
        return STATE_RECORDING;
    } else {
        //检测是否可以获取录音结果

        readSize = audioRecord.read(point, 0, point.length);


        if (readSize <= 0) {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }
            Log.d("CheckAudioPermission", "录音的结果为空");
            return STATE_NO_PERMISSION;

        } else {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }

            return STATE_SUCCESS;
        }
    }
}
 
源代码16 项目: android_9.0.0_r45   文件: FileSynthesisCallback.java
@Override
public int start(int sampleRateInHz, int audioFormat, int channelCount) {
    if (DBG) {
        Log.d(TAG, "FileSynthesisRequest.start(" + sampleRateInHz + "," + audioFormat
                + "," + channelCount + ")");
    }
    if (audioFormat != AudioFormat.ENCODING_PCM_8BIT &&
        audioFormat != AudioFormat.ENCODING_PCM_16BIT &&
        audioFormat != AudioFormat.ENCODING_PCM_FLOAT) {
        Log.e(TAG, "Audio format encoding " + audioFormat + " not supported. Please use one " +
                   "of AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT or " +
                   "AudioFormat.ENCODING_PCM_FLOAT");
    }
    mDispatcher.dispatchOnBeginSynthesis(sampleRateInHz, audioFormat, channelCount);

    FileChannel fileChannel = null;
    synchronized (mStateLock) {
        if (mStatusCode == TextToSpeech.STOPPED) {
            if (DBG) Log.d(TAG, "Request has been aborted.");
            return errorCodeOnStop();
        }
        if (mStatusCode != TextToSpeech.SUCCESS) {
            if (DBG) Log.d(TAG, "Error was raised");
            return TextToSpeech.ERROR;
        }
        if (mStarted) {
            Log.e(TAG, "Start called twice");
            return TextToSpeech.ERROR;
        }
        mStarted = true;
        mSampleRateInHz = sampleRateInHz;
        mAudioFormat = audioFormat;
        mChannelCount = channelCount;

        mDispatcher.dispatchOnStart();
        fileChannel = mFileChannel;
    }

    try {
        fileChannel.write(ByteBuffer.allocate(WAV_HEADER_LENGTH));
            return TextToSpeech.SUCCESS;
    } catch (IOException ex) {
        Log.e(TAG, "Failed to write wav header to output file descriptor", ex);
        synchronized (mStateLock) {
            cleanUp();
            mStatusCode = TextToSpeech.ERROR_OUTPUT;
        }
        return TextToSpeech.ERROR;
    }
}
 
源代码17 项目: DeviceConnect-Android   文件: MicOpusRecorder.java
/**
 * 音声をレコードして、MediaCodec に渡します.
 */
private void recordAudio() throws NativeInterfaceException {
    int samplingRate = mSamplingRate.getValue();
    int channels = mChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO;
    int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    int bufferSize = AudioRecord.getMinBufferSize(samplingRate, channels, audioFormat) * 4;
    int oneFrameDataCount = mSamplingRate.getValue() / mFrameSize.getFps();

    mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT,
            samplingRate,
            channels,
            audioFormat,
            bufferSize);

    if (mAudioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
        if (mAudioRecordCallback != null) {
            mAudioRecordCallback.onEncoderError();
        }
        return;
    }

    if (mUseAEC && AcousticEchoCanceler.isAvailable()) {
        // ノイズキャンセラー
        mEchoCanceler = AcousticEchoCanceler.create(mAudioRecord.getAudioSessionId());
        if (mEchoCanceler != null) {
            int ret = mEchoCanceler.setEnabled(true);
            if (ret != AudioEffect.SUCCESS) {
                if (DEBUG) {
                    Log.w(TAG, "AcousticEchoCanceler is not supported.");
                }
            }
        }
    }

    OpusEncoder opusEncoder = null;

    try {
        opusEncoder = new OpusEncoder(mSamplingRate, mChannels, mFrameSize, mBitRate, mApplication);

        mAudioRecord.startRecording();

        short[] emptyBuffer = new short[oneFrameDataCount];
        short[] pcmBuffer = new short[oneFrameDataCount];
        byte[] opusFrameBuffer = opusEncoder.bufferAllocate();
        while (!mStopFlag) {
            int readSize = mAudioRecord.read(pcmBuffer, 0, oneFrameDataCount);
            if (readSize > 0) {
                int opusFrameBufferLength;
                if (isMute()) {
                    opusFrameBufferLength = opusEncoder.encode(emptyBuffer, readSize, opusFrameBuffer);
                } else {
                    opusFrameBufferLength = opusEncoder.encode(pcmBuffer, readSize, opusFrameBuffer);
                }

                if (opusFrameBufferLength > 0 && mAudioRecordCallback != null) {
                    mAudioRecordCallback.onPeriodicNotification(opusFrameBuffer, opusFrameBufferLength);
                }
            } else if (readSize == AudioRecord.ERROR_INVALID_OPERATION) {
                if (DEBUG) {
                    Log.e(TAG, "Invalid operation error.");
                }
                break;
            } else if (readSize == AudioRecord.ERROR_BAD_VALUE) {
                if (DEBUG) {
                    Log.e(TAG, "Bad value error.");
                }
                break;
            } else if (readSize == AudioRecord.ERROR) {
                if (DEBUG) {
                    Log.e(TAG, "Unknown error.");
                }
                break;
            }
        }
    } finally {
        if (mEchoCanceler != null) {
            mEchoCanceler.release();
            mEchoCanceler = null;
        }

        if (opusEncoder != null) {
            opusEncoder.release();
        }
    }
}
 
源代码18 项目: imsdk-android   文件: CheckPermission.java
/**
 * 用于检测是否具有录音权限
 *
 * @return
 */
public static int getRecordState() {
    int minBuffer = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat
            .ENCODING_PCM_16BIT);
    AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, 44100, AudioFormat
            .CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, (minBuffer * 100));
    short[] point = new short[minBuffer];
    int readSize = 0;
    try {

        audioRecord.startRecording();//检测是否可以进入初始化状态
    } catch (Exception e) {
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }
        return STATE_NO_PERMISSION;
    }
    if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
        //6.0以下机型都会返回此状态,故使用时需要判断bulid版本
        //检测是否在录音中
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
            audioRecord = null;
            Log.d("CheckAudioPermission", "录音机被占用");
        }
        return STATE_RECORDING;
    } else {
        //检测是否可以获取录音结果

        readSize = audioRecord.read(point, 0, point.length);


        if (readSize <= 0) {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }
            Log.d("CheckAudioPermission", "录音的结果为空");
            return STATE_NO_PERMISSION;

        } else {
            if (audioRecord != null) {
                audioRecord.stop();
                audioRecord.release();
                audioRecord = null;

            }

            return STATE_SUCCESS;
        }
    }
}
 
源代码19 项目: TikTok   文件: CheckPermissionUtil.java
/**
     * 判断是是否有录音权限
     */
    public static boolean isHasAudioPermission(final Context context){
        int bufferSizeInBytes = 0;
        bufferSizeInBytes = AudioRecord.getMinBufferSize(44100,
                AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
        AudioRecord audioRecord =  new AudioRecord(MediaRecorder.AudioSource.MIC, 44100,
                AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes);
        //开始录制音频
        //开始录制音频
        try{
            // 防止某些手机崩溃,例如联想
            audioRecord.startRecording();
        }catch (IllegalStateException e){
            e.printStackTrace();
//            AVLogUtils.e(TAG, Log.getStackTraceString(e));
        }
        /**
         * 根据开始录音判断是否有录音权限
         */
        if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING
                && audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_STOPPED) {
//            AVLogUtils.e(TAG, "audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING : " + audioRecord.getRecordingState());
            return false;
        }

        if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
            //如果短时间内频繁检测,会造成audioRecord还未销毁完成,此时检测会返回RECORDSTATE_STOPPED状态,再去read,会读到0的size,可以更具自己的需求返回true或者false
            return false;
        }

        byte[] bytes = new byte[1024];
        int readSize = audioRecord.read(bytes, 0, 1024);
        if (readSize == AudioRecord.ERROR_INVALID_OPERATION || readSize <= 0) {
//            AVLogUtils.e(TAG, "readSize illegal : " + readSize);
            return false;
        }
        audioRecord.stop();
        audioRecord.release();
        audioRecord = null;

        return true;
    }
 
源代码20 项目: MediaPlayer-Extended   文件: AudioPlayback.java
/**
 * Initializes or reinitializes the audio track with the supplied format for playback
 * while keeping the playstate. Keeps the current configuration and skips reinitialization
 * if the new format is the same as the current format.
 */
public void init(MediaFormat format) {
    Log.d(TAG, "init");

    boolean playing = false;

    if(isInitialized()) {
        if(!checkIfReinitializationRequired(format)) {
            // Set new format that equals the old one (in case we compare references somewhere)
            mAudioFormat = format;
            return;
        }

        playing = isPlaying();
        pause();
        stopAndRelease(false);
    } else {
        // deferred creation of the audio thread until its first use
        mAudioThread = new AudioThread();
        mAudioThread.setPaused(true);
        mAudioThread.start();
    }

    mAudioFormat = format;

    int channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
    int bytesPerSample = 2;
    mFrameSize = bytesPerSample * channelCount;
    mSampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);

    int channelConfig = AudioFormat.CHANNEL_OUT_DEFAULT;
    switch(channelCount) {
        case 1:
            channelConfig = AudioFormat.CHANNEL_OUT_MONO;
            break;
        case 2:
            channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
            break;
        case 4:
            channelConfig = AudioFormat.CHANNEL_OUT_QUAD;
            break;
        case 6:
            channelConfig = AudioFormat.CHANNEL_OUT_5POINT1;
            break;
        case 8:
            channelConfig = AudioFormat.CHANNEL_OUT_7POINT1;
    }

    mPlaybackBufferSize = mFrameChunkSize * channelCount;

    mAudioTrack = new AudioTrack(
            mAudioStreamType,
            mSampleRate,
            channelConfig,
            AudioFormat.ENCODING_PCM_16BIT,
            mPlaybackBufferSize, // at least twice the size to enable double buffering (according to docs)
            AudioTrack.MODE_STREAM, mAudioSessionId);

    if(mAudioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
        stopAndRelease();
        throw new IllegalStateException("audio track init failed");
    }

    mAudioSessionId = mAudioTrack.getAudioSessionId();
    mAudioStreamType = mAudioTrack.getStreamType();
    setStereoVolume(mVolumeLeft, mVolumeRight);
    mPresentationTimeOffsetUs = PTS_NOT_SET;

    if(playing) {
        play();
    }
}