类android.media.MediaRecorder.AudioSource源码实例Demo

下面列出了怎么用android.media.MediaRecorder.AudioSource的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: semitone   文件: RecordEngine.java
public static void create(Activity a) {
    if (created) return;

    created = ContextCompat.checkSelfPermission(a, Manifest.permission.RECORD_AUDIO)
            == PackageManager.PERMISSION_GRANTED;
    if (!created) return;

    bufsize = AudioRecord.getMinBufferSize(SAMPLE_RATE,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    ar = new AudioRecord(AudioSource.MIC, SAMPLE_RATE,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT,
            bufsize);

    DSP.init(bufsize);

    resume();
}
 
public AudioRecord findAudioRecord() {
	for (int rate : AudioBuffer.POSSIBLE_SAMPLE_RATES) {
		for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT }) {
			for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO }) {
				try {
					Log.d(TAG, "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
							+ channelConfig);
					int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);

					if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
						// check if we can instantiate and have a success
						AudioRecord recorder = new AudioRecord(AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);

						if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
							return recorder;
						}
					}
				} catch (Exception e) {
					Log.e(TAG, rate + "Exception, keep trying.",e);
				}
			}
		}
	}
	return null;
}
 
源代码3 项目: pocketsphinx-android   文件: SpeechRecognizer.java
/**
 * Creates speech recognizer. Recognizer holds the AudioRecord object, so you 
 * need to call {@link release} in order to properly finalize it.
 * 
 * @param config The configuration object
 * @throws IOException thrown if audio recorder can not be created for some reason.
 */
protected SpeechRecognizer(Config config) throws IOException {
    decoder = new Decoder(config);
    sampleRate = (int)decoder.getConfig().getFloat("-samprate");
    bufferSize = Math.round(sampleRate * BUFFER_SIZE_SECONDS);
    recorder = new AudioRecord(
            AudioSource.VOICE_RECOGNITION, sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);

    if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) {
        recorder.release();
        throw new IOException(
                "Failed to initialize recorder. Microphone might be already in use.");
    }
}
 
源代码4 项目: CSipSimple   文件: Compatibility.java
public static String getDefaultMicroSource() {
    // Except for galaxy S II :(
    if (!isCompatible(11) && Build.DEVICE.toUpperCase().startsWith("GT-I9100")) {
        return Integer.toString(AudioSource.MIC);
    }

    if (isCompatible(10)) {
        // Note that in APIs this is only available from level 11.
        // VOICE_COMMUNICATION
        return Integer.toString(0x7);
    }
    /*
     * Too risky in terms of regressions else if (isCompatible(4)) { //
     * VOICE_CALL return 0x4; }
     */
    /*
     * if(android.os.Build.MODEL.equalsIgnoreCase("X10i")) { // VOICE_CALL
     * return Integer.toString(0x4); }
     */
    /*
     * Not relevant anymore, atrix I tested sounds fine with that
     * if(android.os.Build.DEVICE.equalsIgnoreCase("olympus")) { //Motorola
     * atrix bug // CAMCORDER return Integer.toString(0x5); }
     */

    return Integer.toString(AudioSource.DEFAULT);
}
 
源代码5 项目: AndroidRecording   文件: AudioRecordingThread.java
@Override
public void run() {
	FileOutputStream out = prepareWriting();
	if (out == null) { return; }
	
	AudioRecord record = new AudioRecord(AudioSource.VOICE_RECOGNITION, /*AudioSource.MIC*/
			    			 SAMPLING_RATE,
			    			 AudioFormat.CHANNEL_IN_MONO,
			    			 AudioFormat.ENCODING_PCM_16BIT,
			    			 bufferSize);
 record.startRecording();
	
 int read = 0;
 while (isRecording) {
	    read = record.read(audioBuffer, 0, bufferSize);
    
	    if ((read == AudioRecord.ERROR_INVALID_OPERATION) || 
	    	(read == AudioRecord.ERROR_BAD_VALUE) ||
	    	(read <= 0)) {
	    	continue;
	    }
	    
    	proceed();
	    write(out);
 }
   
 record.stop();
 record.release();
   
 finishWriting(out);
 convertRawToWav();
}
 
源代码6 项目: CameraV   文件: AACHelper.java
private int initAudioRecord(int rate)
{
    try
    {
        Log.v("===========Attempting rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
        bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);

        if (bufferSize != AudioRecord.ERROR_BAD_VALUE)
        {
            // check if we can instantiate and have a success
            recorder = new AudioRecord(AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize);

            if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
            {
                Log.v("===========final rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);

                return rate;
            }
        }
    }
    catch (Exception e)
    {
        Log.v("error", "" + rate);
    }

    return -1;
}
 
源代码7 项目: webrtc_android   文件: WebRtcAudioRecord.java
private static int getDefaultAudioSource() {
  return AudioSource.VOICE_COMMUNICATION;
}
 
@ReactMethod
public void init(ReadableMap options) {
    sampleRateInHz = 44100;
    if (options.hasKey("sampleRate")) {
        sampleRateInHz = options.getInt("sampleRate");
    }

    channelConfig = AudioFormat.CHANNEL_IN_MONO;
    if (options.hasKey("channels")) {
        if (options.getInt("channels") == 2) {
            channelConfig = AudioFormat.CHANNEL_IN_STEREO;
        }
    }

    audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    if (options.hasKey("bitsPerSample")) {
        if (options.getInt("bitsPerSample") == 8) {
            audioFormat = AudioFormat.ENCODING_PCM_8BIT;
        }
    }

    audioSource = AudioSource.VOICE_RECOGNITION;
    if (options.hasKey("audioSource")) {
        audioSource = options.getInt("audioSource");
    }

    String documentDirectoryPath = getReactApplicationContext().getFilesDir().getAbsolutePath();
    outFile = documentDirectoryPath + "/" + "audio.wav";
    tmpFile = documentDirectoryPath + "/" + "temp.pcm";
    if (options.hasKey("wavFile")) {
        String fileName = options.getString("wavFile");
        outFile = documentDirectoryPath + "/" + fileName;
    }

    isRecording = false;
    eventEmitter = reactContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter.class);

    bufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
    int recordingBufferSize = bufferSize * 3;
    recorder = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, recordingBufferSize);
}
 
/**
 * Returns an AssistantManager if all required parameters have been supplied.
 *
 * @return An inactive AssistantManager. Call {@link EmbeddedAssistant#connect()} to start
 * it.
 */
public EmbeddedAssistant build() {
    if (mEmbeddedAssistant.mRequestCallback == null) {
        throw new NullPointerException("There must be a defined RequestCallback");
    }
    if (mEmbeddedAssistant.mConversationCallback == null) {
        throw new NullPointerException("There must be a defined ConversationCallback");
    }
    if (mEmbeddedAssistant.mUserCredentials == null) {
        throw new NullPointerException("There must be provided credentials");
    }
    if (mSampleRate == 0) {
        throw new NullPointerException("There must be a defined sample rate");
    }
    final int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;

    // Construct audio configurations.
    mEmbeddedAssistant.mAudioInConfig = AudioInConfig.newBuilder()
            .setEncoding(AudioInConfig.Encoding.LINEAR16)
            .setSampleRateHertz(mSampleRate)
            .build();
    mEmbeddedAssistant.mAudioOutConfig = AudioOutConfig.newBuilder()
            .setEncoding(AudioOutConfig.Encoding.LINEAR16)
            .setSampleRateHertz(mSampleRate)
            .setVolumePercentage(mEmbeddedAssistant.mVolume)
            .build();

    // Initialize Audio framework parameters.
    mEmbeddedAssistant.mAudioInputFormat = new AudioFormat.Builder()
            .setChannelMask(AudioFormat.CHANNEL_IN_MONO)
            .setEncoding(audioEncoding)
            .setSampleRate(mSampleRate)
            .build();
    mEmbeddedAssistant.mAudioInputBufferSize = AudioRecord.getMinBufferSize(
            mEmbeddedAssistant.mAudioInputFormat.getSampleRate(),
            mEmbeddedAssistant.mAudioInputFormat.getChannelMask(),
            mEmbeddedAssistant.mAudioInputFormat.getEncoding());
    mEmbeddedAssistant.mAudioOutputFormat = new AudioFormat.Builder()
            .setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
            .setEncoding(audioEncoding)
            .setSampleRate(mSampleRate)
            .build();
    mEmbeddedAssistant.mAudioOutputBufferSize = AudioTrack.getMinBufferSize(
            mEmbeddedAssistant.mAudioOutputFormat.getSampleRate(),
            mEmbeddedAssistant.mAudioOutputFormat.getChannelMask(),
            mEmbeddedAssistant.mAudioOutputFormat.getEncoding());

    // create new AudioRecord to workaround audio routing issues.
    mEmbeddedAssistant.mAudioRecord = new AudioRecord.Builder()
            .setAudioSource(AudioSource.VOICE_RECOGNITION)
            .setAudioFormat(mEmbeddedAssistant.mAudioInputFormat)
            .setBufferSizeInBytes(mEmbeddedAssistant.mAudioInputBufferSize)
            .build();
    if (mEmbeddedAssistant.mAudioInputDevice != null) {
        boolean result = mEmbeddedAssistant.mAudioRecord.setPreferredDevice(
                mEmbeddedAssistant.mAudioInputDevice);
        if (!result) {
            Log.e(TAG, "failed to set preferred input device");
        }
    }

    // Construct DeviceConfig
    mEmbeddedAssistant.mDeviceConfig = DeviceConfig.newBuilder()
        .setDeviceId(mDeviceInstanceId)
        .setDeviceModelId(mDeviceModelId)
        .build();

    // Construct default ScreenOutConfig
    mEmbeddedAssistant.mScreenOutConfig = ScreenOutConfig.newBuilder()
            .setScreenMode(ScreenOutConfig.ScreenMode.SCREEN_MODE_UNSPECIFIED)
            .build();

    return mEmbeddedAssistant;
}
 
源代码10 项目: android-fskmodem   文件: MainActivity.java
@Override
protected void onCreate(Bundle savedInstanceState) {
	super.onCreate(savedInstanceState);
	setContentView(R.layout.activity_main);
	
	/// INIT FSK CONFIG
	
	try {
		mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_16BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P);
	} catch (IOException e1) {
		e1.printStackTrace();
	}

	/// INIT FSK DECODER
	
	mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() {
		
		@Override
		public void decoded(byte[] newData) {
			
			final String text = new String(newData);
			
			runOnUiThread(new Runnable() {
				public void run() {
					
					TextView view = ((TextView) findViewById(R.id.result));
					
					view.setText(view.getText()+text);
				}
			});
		}
	});
	
	///
	
	//make sure that the settings of the recorder match the settings of the decoder
	//most devices cant record anything but 44100 samples in 16bit PCM format...
	mBufferSize = AudioRecord.getMinBufferSize(FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
	
	//scale up the buffer... reading larger amounts of data
	//minimizes the chance of missing data because of thread priority
	mBufferSize *= 10;
	
	//again, make sure the recorder settings match the decoder settings
	mRecorder = new AudioRecord(AudioSource.MIC, FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);

	if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
		mRecorder.startRecording();
		
		//start a thread to read the audio data
		Thread thread = new Thread(mRecordFeed);
		thread.setPriority(Thread.MAX_PRIORITY);
		thread.start();
	}
	else {
		Log.i("FSKDecoder", "Please check the recorder settings, something is wrong!");
	}
}
 
 类所在包
 类方法
 同包方法