类android.media.AudioRecord源码实例Demo

下面列出了怎么用android.media.AudioRecord的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: Tok-Android   文件: OpusAudioRecorder.java
public void stopRecording(final boolean send, boolean vibrate) {
    recordQueue.cancelRunnable(recordStartRunnable);
    if (vibrate) {
        vibrate(new long[] { 0L, 10L });
    }
    recordQueue.postRunnable(new Runnable() {
        @Override
        public void run() {
            if (audioRecord != null) {
                try {
                    sendAfterDone = send;
                    if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
                        audioRecord.stop();
                    }
                } catch (Exception e) {
                    if (recordingAudioFile != null) {
                        recordingAudioFile.delete();
                    }
                }
                OpusAudioRecorder.this.stopRecordingInternal(send);
            }
        }
    }, 0);
}
 
源代码2 项目: Telegram   文件: AudioRecordJNI.java
public boolean start() {
	if(audioRecord==null || audioRecord.getState()!=AudioRecord.STATE_INITIALIZED)
		return false;
	try{
		if(thread==null){
				if(audioRecord==null)
					return false;
				audioRecord.startRecording();
			startThread();
		}else{
			audioRecord.startRecording();
		}
		return true;
	}catch(Exception x){
		VLog.e("Error initializing AudioRecord", x);
	}
	return false;
}
 
源代码3 项目: doppler-android   文件: Doppler.java
public Doppler() {
    //write a check to see if stereo is supported
    bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    buffer = new short[bufferSize];

    frequency = PRELIM_FREQ;
    freqIndex = PRELIM_FREQ_INDEX;

    frequencyPlayer = new FrequencyPlayer(PRELIM_FREQ);

    microphone = new AudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION, DEFAULT_SAMPLE_RATE,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);

    mHandler = new Handler();

    calibrator = new Calibrator();
}
 
源代码4 项目: pocketsphinx-android   文件: SpeechRecognizer.java
/**
 * Creates speech recognizer. Recognizer holds the AudioRecord object, so you 
 * need to call {@link release} in order to properly finalize it.
 * 
 * @param config The configuration object
 * @throws IOException thrown if audio recorder can not be created for some reason.
 */
protected SpeechRecognizer(Config config) throws IOException {
    decoder = new Decoder(config);
    sampleRate = (int)decoder.getConfig().getFloat("-samprate");
    bufferSize = Math.round(sampleRate * BUFFER_SIZE_SECONDS);
    recorder = new AudioRecord(
            AudioSource.VOICE_RECOGNITION, sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);

    if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) {
        recorder.release();
        throw new IOException(
                "Failed to initialize recorder. Microphone might be already in use.");
    }
}
 
源代码5 项目: Lassi-Android   文件: AudioMediaEncoder.java
private void read(boolean endOfStream) {
    mCurrentBuffer = mByteBufferPool.get();
    if (mCurrentBuffer == null) {
        LOG.e("Skipping audio frame, encoding is too slow.");
        // TODO should fix the next presentation time here. However this is
        // extremely unlikely based on my tests. The mByteBufferPool should be big enough.
    } else {
        mCurrentBuffer.clear();
        mReadBytes = mAudioRecord.read(mCurrentBuffer, FRAME_SIZE);
        if (mReadBytes > 0) { // Good read: increase PTS.
            increaseTime(mReadBytes);
            mCurrentBuffer.limit(mReadBytes);
            onBuffer(endOfStream);
        } else if (mReadBytes == AudioRecord.ERROR_INVALID_OPERATION) {
            LOG.e("Got AudioRecord.ERROR_INVALID_OPERATION");
        } else if (mReadBytes == AudioRecord.ERROR_BAD_VALUE) {
            LOG.e("Got AudioRecord.ERROR_BAD_VALUE");
        }
    }
}
 
源代码6 项目: cythara   文件: AudioDispatcherFactory.java
/**
 * Create a new AudioDispatcher connected to the default microphone.
 * 
 * @param sampleRate
 *            The requested sample rate.
 * @param audioBufferSize
 *            The size of the audio buffer (in samples).
 * 
 * @param bufferOverlap
 *            The size of the overlap (in samples).
 * @return A new AudioDispatcher
 */
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
		final int audioBufferSize, final int bufferOverlap) {
	int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT);
	int minAudioBufferSizeInSamples =  minAudioBufferSize/2;
	if(minAudioBufferSizeInSamples <= audioBufferSize ){
	AudioRecord audioInputStream = new AudioRecord(
			MediaRecorder.AudioSource.MIC, sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT,
			audioBufferSize * 2);

	TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);
	
	TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
	//start recording ! Opens the stream.
	audioInputStream.startRecording();
	return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
	}else{
		throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
	}
}
 
源代码7 项目: Android   文件: AudioCapturer.java
public void stopCapture() {

        if (!mIsCaptureStarted) {
            return;
        }

        mIsLoopExit = true;		
        try {
            mCaptureThread.interrupt();
            mCaptureThread.join(1000);
        } 
        catch (InterruptedException e) {		
            e.printStackTrace();
        }

        if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
            mAudioRecord.stop();						
        }

        mAudioRecord.release();		
	
        mIsCaptureStarted = false;
        mAudioFrameCapturedListener = null;

        Log.d(TAG, "Stop audio capture success !");
    }
 
源代码8 项目: PermissionAgent   文件: RecordAudioTester.java
@Override
public boolean test() throws Throwable {
    AudioRecord audioRecord = findAudioRecord();
    try {
        if (audioRecord != null) {
            audioRecord.startRecording();
        } else {
            return !existMicrophone(mContext);
        }
    } catch (Throwable e) {
        return !existMicrophone(mContext);
    } finally {
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
        }
    }
    return true;
}
 
源代码9 项目: PermissionAgent   文件: RecordAudioTester.java
@Override
public boolean test() throws Throwable {
    AudioRecord audioRecord = findAudioRecord();
    try {
        if (audioRecord != null) {
            audioRecord.startRecording();
        } else {
            return !existMicrophone(mContext);
        }
    } catch (Throwable e) {
        return !existMicrophone(mContext);
    } finally {
        if (audioRecord != null) {
            audioRecord.stop();
            audioRecord.release();
        }
    }
    return true;
}
 
源代码10 项目: EvilsLive   文件: AudioCapture.java
@Override
public void run() {
    while (!mIsLoopExit) {
        byte[] buffer = new byte[mMinBufferSize];

        int ret = mAudioRecord.read(buffer, 0, mMinBufferSize);

        if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
            Log.e(TAG, "Error ERROR_INVALID_OPERATION");
        } else if (ret == AudioRecord.ERROR_BAD_VALUE) {
            Log.e(TAG, "Error ERROR_BAD_VALUE");
        } else {
            if (mAudioFrameCapturedListener != null) {
                mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
            }
            Log.d(TAG, "OK, Captured " + ret + " bytes !");
        }
        SystemClock.sleep(10);
    }

}
 
源代码11 项目: RtmpPublisher   文件: AudioRecorder.java
public void start() {
    final int bufferSize =
            AudioRecord.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_IN_MONO,
                    AudioFormat.ENCODING_PCM_16BIT);

    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRate,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);

    audioRecord.startRecording();

    HandlerThread handlerThread = new HandlerThread("AudioRecorder-record");
    handlerThread.start();
    Handler handler = new Handler(handlerThread.getLooper());
    handler.post(new Runnable() {
        @Override
        public void run() {
            int bufferReadResult;
            byte[] data = new byte[bufferSize];
            // keep running... so use a different thread.
            while (isRecording() && (bufferReadResult = audioRecord.read(data, 0, bufferSize)) > 0) {
                listener.onAudioRecorded(data, bufferReadResult);
            }
        }
    });
}
 
源代码12 项目: AndroidInstantVideo   文件: AACEncoder.java
public AACEncoder(final StreamPublisher.StreamPublisherParam params) throws IOException {
    this.samplingRate = params.samplingRate;

    bufferSize = params.audioBufferSize;
    mMediaCodec = MediaCodec.createEncoderByType(params.audioMIME);
    mMediaCodec.configure(params.createAudioMediaFormat(), null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
    mediaCodecInputStream = new MediaCodecInputStream(mMediaCodec, new MediaCodecInputStream.MediaFormatCallback() {
        @Override
        public void onChangeMediaFormat(MediaFormat mediaFormat) {
            params.setAudioOutputMediaFormat(mediaFormat);
        }
    });
    mAudioRecord = new AudioRecord(params.audioSource, samplingRate, params.channelCfg, AudioFormat.ENCODING_PCM_16BIT, bufferSize);
    if (NoiseSuppressor.isAvailable()) {
        NoiseSuppressor noiseSuppressor = NoiseSuppressor.create(mAudioRecord.getAudioSessionId());
    }

}
 
源代码13 项目: PLDroidRTCStreaming   文件: ExtAudioCapture.java
@Override
public void run() {
    while (!mIsLoopExit) {
        byte[] buffer = new byte[SAMPLES_PER_FRAME * 2];
        int ret = mAudioRecord.read(buffer, 0, buffer.length);
        if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
            Log.e(TAG, "Error ERROR_INVALID_OPERATION");
        } else if (ret == AudioRecord.ERROR_BAD_VALUE) {
            Log.e(TAG, "Error ERROR_BAD_VALUE");
        } else {
            if (mOnAudioFrameCapturedListener != null) {
                mOnAudioFrameCapturedListener.onAudioFrameCaptured(buffer, System.nanoTime());
            }
        }
    }
}
 
源代码14 项目: Jumble   文件: AudioInput.java
@Override
public void run() {
    android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

    Log.i(Constants.TAG, "AudioInput: started");

    mAudioRecord.startRecording();

    if(mAudioRecord.getState() != AudioRecord.STATE_INITIALIZED)
        return;

    final short[] mAudioBuffer = new short[mFrameSize];
    // We loop when the 'recording' instance var is true instead of checking audio record state because we want to always cleanly shutdown.
    while(mRecording) {
        int shortsRead = mAudioRecord.read(mAudioBuffer, 0, mFrameSize);
        if(shortsRead > 0) {
            mListener.onAudioInputReceived(mAudioBuffer, mFrameSize);
        } else {
            Log.e(Constants.TAG, "Error fetching audio! AudioRecord error " + shortsRead);
        }
    }

    mAudioRecord.stop();

    Log.i(Constants.TAG, "AudioInput: stopped");
}
 
public AudioRecord findAudioRecord() {
	for (int rate : AudioBuffer.POSSIBLE_SAMPLE_RATES) {
		for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT }) {
			for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO }) {
				try {
					Log.d(TAG, "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
							+ channelConfig);
					int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);

					if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
						// check if we can instantiate and have a success
						AudioRecord recorder = new AudioRecord(AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);

						if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
							return recorder;
						}
					}
				} catch (Exception e) {
					Log.e(TAG, rate + "Exception, keep trying.",e);
				}
			}
		}
	}
	return null;
}
 
源代码16 项目: OmRecorder   文件: PullableSource.java
@Override
public AudioRecord preparedToBePulled() {
  if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
    if (android.media.audiofx.AutomaticGainControl.isAvailable()) {
      android.media.audiofx.AutomaticGainControl automaticGainControl = android.media.audiofx.AutomaticGainControl
          .create(audioRecord().getAudioSessionId());
      if (automaticGainControl != null) {
        automaticGainControl.setEnabled(true);
        Log.i(getClass().getSimpleName(), "AutomaticGainControl ON");
      } else {
        Log.i(getClass().getSimpleName(), "AutomaticGainControl failed :(");
      }
    } else {
      Log.i(getClass().getSimpleName(), "This device don't support AutomaticGainControl");
    }
  } else {
    Log.i(getClass().getSimpleName(),
        "For this effect, Android api should be higher than or equals 16");
  }
  return super.preparedToBePulled();
}
 
源代码17 项目: NoiseCapture   文件: AudioProcess.java
private AudioRecord createAudioRecord() {
    // Source:
    //  section 5.3 of the Android 4.0 Compatibility Definition
    // https://source.android.com/compatibility/4.0/android-4.0-cdd.pdf
    // Using VOICE_RECOGNITION
    // Noise reduction processing, if present, is disabled.
    // Except for 5.0+ where android.media.audiofx.NoiseSuppressor could be use to cancel such processing
    // Automatic gain control, if present, is disabled.
    if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
        return new AudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION,
                rate, audioChannel,
                encoding, bufferSize);
    } else {
        return null;
    }
}
 
源代码18 项目: TikTok   文件: AudioTrackManager.java
/**
 * 停止播放
 */
public void stopPlay() {
    try {
        destroyThread();
        if (audioTrack != null) {
            if (audioTrack.getState() == AudioRecord.STATE_INITIALIZED) {
                audioTrack.stop();
            }
            if (audioTrack != null) {
                audioTrack.release();
            }
        }
        if (dis != null) {
            dis.close();
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
源代码19 项目: Android   文件: AudioCapturer.java
@Override
public void run() {

    while (!mIsLoopExit) {

        byte[] buffer = new byte[mMinBufferSize];

        int ret = mAudioRecord.read(buffer, 0, mMinBufferSize);				
        if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
            Log.e(TAG , "Error ERROR_INVALID_OPERATION");
        } 
        else if (ret == AudioRecord.ERROR_BAD_VALUE) {
            Log.e(TAG , "Error ERROR_BAD_VALUE");
        } 
        else { 
            if (mAudioFrameCapturedListener != null) {
                mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
            }   
            Log.d(TAG , "OK, Captured "+ret+" bytes !");
        }			
        
        SystemClock.sleep(10);
    }		
}
 
源代码20 项目: EvilsLive   文件: AudioCapture.java
public void stopCapture() {
    if (!mIsCaptureStarted) {
        return;
    }

    mIsLoopExit = true;

    try {
        mCaptureThread.interrupt();
        mCaptureThread.join(1000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
        mAudioRecord.stop();
    }

    mAudioRecord.release();
    mIsCaptureStarted = false;
    mAudioFrameCapturedListener = null;

    Log.d(TAG, "Stop audio capture success !");

}
 
源代码21 项目: EvilsLive   文件: AudioCapture.java
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
        int numOfChannels, bitsPersample;
        if (channelConfig == AudioFormat.CHANNEL_IN_MONO) {
            numOfChannels = 1;
        } else {
            numOfChannels = 2;
        }
        if (AudioFormat.ENCODING_PCM_16BIT == audioFormat) {
            bitsPersample = 16;
        } else {
            bitsPersample = 8;
        }
        int periodInFrames = sampleRate * TIMER_INTERVAL / 1000;		//num of frames in a second is same as sample rate
        //refer to android/4.1.1/frameworks/av/media/libmedia/AudioRecord.cpp, AudioRecord::getMinFrameCount method
        //we times 2 for ping pong use of record buffer
        mMinBufferSize = periodInFrames * 2  * numOfChannels * bitsPersample / 8;
        if (mMinBufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) {
            // Check to make sure buffer size is not smaller than the smallest allowed one
            mMinBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
            // Set frame period and timer interval accordingly
//            periodInFrames = mMinBufferSize / ( 2 * bitsPersample * numOfChannels / 8 );
        }

        return mMinBufferSize;
    }
 
@Override
public void run() {
    ByteBuffer audioData = ByteBuffer.allocateDirect(SAMPLE_BLOCK_SIZE);
    if (mAudioInputDevice != null) {
        mAudioRecord.setPreferredDevice(mAudioInputDevice);
    }
    int result =
            mAudioRecord.read(audioData, audioData.capacity(), AudioRecord.READ_BLOCKING);
    if (result < 0) {
        Log.e(TAG, "error reading from audio stream:" + result);
        return;
    }
    Log.d(TAG, "streaming ConverseRequest: " + result);
    mAssistantRequestObserver.onNext(AssistRequest.newBuilder()
            .setAudioIn(ByteString.copyFrom(audioData))
            .build());
    mAssistantHandler.post(mStreamAssistantRequest);
}
 
源代码23 项目: PLDroidRTCStreaming   文件: ExtAudioCapture.java
public void stopCapture() {
    if (!mIsCaptureStarted) {
        return;
    }

    mIsLoopExit = true;
    try {
        mCaptureThread.interrupt();
        mCaptureThread.join(1000);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
        mAudioRecord.stop();
    }

    mAudioRecord.release();

    mIsCaptureStarted = false;
    mOnAudioFrameCapturedListener = null;

    Log.d(TAG, "Stop audio capture success !");
}
 
源代码24 项目: Plumble   文件: Preferences.java
private static void configureAudioPreferences(final PreferenceScreen screen) {
    ListPreference inputPreference = (ListPreference) screen.findPreference(Settings.PREF_INPUT_METHOD);
    inputPreference.setOnPreferenceChangeListener(new Preference.OnPreferenceChangeListener() {
        @Override
        public boolean onPreferenceChange(Preference preference, Object newValue) {
            updateAudioDependents(screen, (String) newValue);
            return true;
        }
    });

    // Scan each bitrate and determine if the device supports it
    ListPreference inputQualityPreference = (ListPreference) screen.findPreference(Settings.PREF_INPUT_RATE);
    String[] bitrateNames = new String[inputQualityPreference.getEntryValues().length];
    for(int x=0;x<bitrateNames.length;x++) {
        int bitrate = Integer.parseInt(inputQualityPreference.getEntryValues()[x].toString());
        boolean supported = AudioRecord.getMinBufferSize(bitrate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT) > 0;
        bitrateNames[x] = bitrate+"Hz" + (supported ? "" : " (unsupported)");
    }
    inputQualityPreference.setEntries(bitrateNames);

    updateAudioDependents(screen, inputPreference.getValue());
}
 
private void init() {
    synchronized (recognizerLock) {
        final int bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE_IN_HZ, CHANNEL_CONFIG, AUDIO_FORMAT);

        audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
                SAMPLE_RATE_IN_HZ,
                CHANNEL_CONFIG,
                AUDIO_FORMAT,
                bufferSize);

        vad.setEnabled(config.isVoiceActivityDetectionEnabled());
        vad.setSpeechListener(this);

        mediaPlayer = new MediaPlayer();
        mediaPlayer.setOnErrorListener(this);
        mediaPlayer.setOnCompletionListener(this);
    }
}
 
源代码26 项目: permissions4m   文件: AudioRecordManager.java
/**
 * stop record
 *
 * @throws IOException
 * @throws InterruptedException
 */
public void stopRecord() throws IOException, InterruptedException {
    // specially for OPPO、XIAOMI、MEIZU、HUAWEI and so on
    Thread.sleep(250);
    destroyThread();
    if (mRecorder != null) {
        if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
            mRecorder.stop();
        }
        if (mRecorder != null) {
            mRecorder.release();
        }
    }
    if (dos != null) {
        dos.flush();
        dos.close();
    }
    length = file.length();
    deleteFile();
}
 
源代码27 项目: snips-platform-android-demo   文件: MainActivity.java
private void runStreaming() {
    Log.d(TAG, "starting audio streaming");
    final int minBufferSizeInBytes = AudioRecord.getMinBufferSize(FREQUENCY, CHANNEL, ENCODING);
    Log.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);

    recorder = new AudioRecord(MIC, FREQUENCY, CHANNEL, ENCODING, minBufferSizeInBytes);
    recorder.startRecording();

    while (continueStreaming) {
        short[] buffer = new short[minBufferSizeInBytes / 2];
        recorder.read(buffer, 0, buffer.length);
        if (client != null) {
            client.sendAudioBuffer(buffer);
        }
    }
    recorder.stop();
    Log.d(TAG, "audio streaming stopped");
}
 
源代码28 项目: Tok-Android   文件: OpusAudioRecorder.java
private void init() {
    recordBufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
        AudioFormat.ENCODING_PCM_16BIT);
    try {
        PhoneStateListener phoneStateListener = new PhoneStateListener() {

            @Override
            public void onCallStateChanged(int state, String incomingNumber) {
                super.onCallStateChanged(state, incomingNumber);
                if (state != TelephonyManager.CALL_STATE_IDLE) {
                    stopRecording(false, false);
                    if (callBack != null) {
                        callBack.onCancel();
                    }
                }
            }
        };
        context.getSystemService(TelephonyManager.class)
            .listen(phoneStateListener, PhoneStateListener.LISTEN_CALL_STATE);
        recordQueue = new DispatchQueue("recordQueue");
        recordQueue.setPriority(Thread.MAX_PRIORITY);
        fileEncodingQueue = new DispatchQueue("fileEncodingQueue");
        fileEncodingQueue.setPriority(Thread.MAX_PRIORITY);
        recordStartRunnable = new RecordStartRunnable();
        recordRunnable = new RecordRunnable();
    } catch (Exception ignore) {
        ignore.printStackTrace();
    }
}
 
源代码29 项目: droidkit-webrtc   文件: WebRtcAudioRecord.java
@SuppressWarnings("unused")
private int StopRecording() {
    _recLock.lock();
    try {
        // only stop if we are recording
        if (_audioRecord.getRecordingState() ==
          AudioRecord.RECORDSTATE_RECORDING) {
            // stop recording
            try {
                _audioRecord.stop();
            } catch (IllegalStateException e) {
                e.printStackTrace();
                return -1;
            }
        }

        // release the object
        _audioRecord.release();
        _audioRecord = null;

    } finally {
        // Ensure we always unlock, both for success, exception or error
        // return.
        _doRecInit = true;
        _recLock.unlock();
    }

    _isRecording = false;
    return 0;
}
 
源代码30 项目: mollyim-android   文件: AudioCodec.java
public AudioCodec() throws IOException {
  this.bufferSize  = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
  this.audioRecord = createAudioRecord(this.bufferSize);
  this.mediaCodec  = createMediaCodec(this.bufferSize);

  this.mediaCodec.start();

  try {
    audioRecord.startRecording();
  } catch (Exception e) {
    Log.w(TAG, e);
    mediaCodec.release();
    throw new IOException(e);
  }
}
 
 类所在包
 同包方法