下面列出了android.media.AudioFormat#CHANNEL_IN_MONO 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public synchronized void configure(int sampleRate, int channels) {
this.sampleRate = sampleRate;
recordChannels = channels;
switch (recordChannels) {
case 1: {
androidChannels = AudioFormat.CHANNEL_IN_MONO;
}
break;
case 2: {
androidChannels = AudioFormat.CHANNEL_IN_STEREO;
}
break;
}
minBufferSize = AudioRecord.getMinBufferSize(sampleRate, androidChannels, audioEncoding);
if (minBufferSize < 0) {
this.sampleRate = 8000;
minBufferSize = AudioRecord.getMinBufferSize(sampleRate, androidChannels, audioEncoding);
}
}
private void initAudioRecord() {
int bufferSize = AudioRecord.getMinBufferSize(
RECORDING_SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT
);
mAudioRecord = new AudioRecord(
MediaRecorder.AudioSource.MIC,
RECORDING_SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSize
);
if (mAudioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mBufSize = bufferSize;
}
}
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes, boolean noise, boolean gain, boolean echo)
throws IllegalArgumentException {
this(
MediaRecorder.AudioSource.VOICE_RECOGNITION,
sampleRateInHz,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSizeInBytes,
noise,
gain,
echo
);
}
MicRecorder(AudioEncodeConfig config) {
mEncoder = new AudioEncoder(config);
mSampleRate = config.sampleRate;
mChannelsSampleRate = mSampleRate * config.channelCount;
if (VERBOSE) Log.i(TAG, "in bitrate " + mChannelsSampleRate * 16 /* PCM_16BIT*/);
mChannelConfig = config.channelCount == 2 ? AudioFormat.CHANNEL_IN_STEREO : AudioFormat.CHANNEL_IN_MONO;
mRecordThread = new HandlerThread(TAG);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
if (Build.VERSION.SDK_INT >= 23) {
requestPermissions(perms, REQUEST_CODE);
}
ImageButton mImageButton = (ImageButton) findViewById(R.id.action_image);
mImageButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (mRecorder == null || !mRecorder.isInitialized()) {
return;
}
boolean recording = mRecorder.isRecording();
if (recording) {
((ImageButton) v).setImageResource(R.drawable.record);
mRecorder.stop();
} else {
((ImageButton) v).setImageResource(R.drawable.pause);
mRecorder.startRecording();
}
}
});
boolean result = createOutputFile();
if (!result) {
Toast.makeText(this, "创建文件失败~", Toast.LENGTH_SHORT).show();
}
mRecorder = new Recorder(44100,
AudioFormat.CHANNEL_IN_MONO/*单双声道*/,
AudioFormat.ENCODING_PCM_16BIT/*格式*/,
MediaRecorder.AudioSource.MIC/*AudioSource*/,
NUM_SAMPLES/*period*/,
this/*onDataChangeListener*/);
}
/**
* The trigger to open a new AudioRecord and start recording with the intention of sending the audio to the AVS server using the stopRecord(). This will have permissions
* issues in Marshmallow that need to be handled at the Activity level (checking for permission to record audio, and requesting it if we don't already have permissions).
* @param url our POST url
* @param accessToken our user's access token
* @param buffer a byte[] that allows us to prepend whatever audio is recorded by the user with either generated ore pre-recorded audio, this needs
* to be in the same format as the audio being recorded
* @param callback our callback to notify us when we change states
* @throws IOException
*
* @deprecated Manage this state on the application side, instead, and send the audio using {@link SpeechSendAudio}
*/
@Deprecated
public void startRecording(final String url, final String accessToken, @Nullable byte[] buffer,
@Nullable final AsyncCallback<Void, Exception> callback) throws IOException {
synchronized(mLock) {
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, AUDIO_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, BUFFER_SIZE);
}
if(callback != null){
callback.start();
}
mCallback = callback;
mIsRecording = true;
new AsyncTask<Void, Void, Void>() {
@Override
protected Void doInBackground(Void... params) {
synchronized(mLock) {
prepareConnection(url, accessToken);
}
return null;
}
}.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR);
if(buffer != null){
mOutputStream.write(buffer);
}
//record our audio
recordAudio(mAudioRecord, mOutputStream);
}
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
return AudioRecord.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
return AudioRecord.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
private int getChannelMask(int channels) {
switch (channels) {
case 1:
return AudioFormat.CHANNEL_IN_MONO;
case 2:
return AudioFormat.CHANNEL_IN_STEREO;
}
return AudioFormat.CHANNEL_IN_STEREO;
}
/**
* Constructor
* <p>
* Uses the most common application defaults
*/
public SaiyRecorder() {
this.audioSource = MediaRecorder.AudioSource.VOICE_RECOGNITION;
this.sampleRateInHz = 8000;
this.channelConfig = AudioFormat.CHANNEL_IN_MONO;
this.audioFormat = AudioFormat.ENCODING_PCM_16BIT;
this.bufferSizeInBytes = calculateBufferSize();
this.enhance = true;
}
private PullableSource mic() {
return new PullableSource.Default(
new AudioRecordConfig.Default(
MediaRecorder.AudioSource.MIC, AudioFormat.ENCODING_PCM_16BIT,
AudioFormat.CHANNEL_IN_MONO, 44100
)
);
}
private PullableSource mic() {
return new PullableSource.Default(
new AudioRecordConfig.Default(
MediaRecorder.AudioSource.MIC, AudioFormat.ENCODING_PCM_16BIT,
AudioFormat.CHANNEL_IN_MONO, 44100
)
);
}
public SpeechRecord(int sampleRateInHz, int bufferSizeInBytes)
throws IllegalArgumentException {
this(
MediaRecorder.AudioSource.VOICE_RECOGNITION,
sampleRateInHz,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSizeInBytes,
false,
false,
false
);
}
/**
* Writes the proper 44-byte RIFF/WAVE header to/for the given stream
* Two size fields are left empty/null since we do not yet know the final stream size
*
* @param out The stream to write the header to
* @param channelMask An AudioFormat.CHANNEL_* mask
* @param sampleRate The sample rate in hertz
* @param encoding An AudioFormat.ENCODING_PCM_* value
* @throws IOException
*/
private void writeWavHeader(OutputStream out, int channelMask, int sampleRate, int encoding)
throws IOException {
short channels;
switch (channelMask) {
case AudioFormat.CHANNEL_IN_MONO:
channels = 1;
break;
case AudioFormat.CHANNEL_IN_STEREO:
channels = 2;
break;
default:
throw new IllegalArgumentException("Unacceptable channel mask");
}
short bitDepth;
switch (encoding) {
case AudioFormat.ENCODING_PCM_8BIT:
bitDepth = 8;
break;
case AudioFormat.ENCODING_PCM_16BIT:
bitDepth = 16;
break;
case AudioFormat.ENCODING_PCM_FLOAT:
bitDepth = 32;
break;
default:
throw new IllegalArgumentException("Unacceptable encoding");
}
writeWavHeader(out, channels, sampleRate, bitDepth);
}
/**
* 用于检测是否具有录音权限
*
* @return
*/
public static int getRecordState() {
int minBuffer = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat
.ENCODING_PCM_16BIT);
AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, 44100, AudioFormat
.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, (minBuffer * 100));
short[] point = new short[minBuffer];
int readSize = 0;
try {
audioRecord.startRecording();//检测是否可以进入初始化状态
} catch (Exception e) {
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
return STATE_NO_PERMISSION;
}
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
//6.0以下机型都会返回此状态,故使用时需要判断bulid版本
//检测是否在录音中
if (audioRecord != null) {
audioRecord.stop();
audioRecord.release();
audioRecord = null;
LogUtil.d("录音机被占用");
}
return STATE_RECORDING;
} else {
//检测是否可以获取录音结果
readSize = audioRecord.read(point, 0, point.length);
if (readSize <= 0) {
if (audioRecord != null) {
audioRecord.stop();
audioRecord.release();
audioRecord = null;
}
LogUtil.d("录音的结果为空");
return STATE_NO_PERMISSION;
} else {
if (audioRecord != null) {
audioRecord.stop();
audioRecord.release();
audioRecord = null;
}
return STATE_SUCCESS;
}
}
}
private int channelCountToConfiguration(int channels) {
return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
private int channelCountToConfiguration(int channels) {
return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
/// INIT FSK CONFIG
try {
mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_16BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P);
} catch (IOException e1) {
e1.printStackTrace();
}
/// INIT FSK DECODER
mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() {
@Override
public void decoded(byte[] newData) {
final String text = new String(newData);
runOnUiThread(new Runnable() {
public void run() {
TextView view = ((TextView) findViewById(R.id.result));
view.setText(view.getText()+text);
}
});
}
});
///
//make sure that the settings of the recorder match the settings of the decoder
//most devices cant record anything but 44100 samples in 16bit PCM format...
mBufferSize = AudioRecord.getMinBufferSize(FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
//scale up the buffer... reading larger amounts of data
//minimizes the chance of missing data because of thread priority
mBufferSize *= 10;
//again, make sure the recorder settings match the decoder settings
mRecorder = new AudioRecord(AudioSource.MIC, FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);
if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
mRecorder.startRecording();
//start a thread to read the audio data
Thread thread = new Thread(mRecordFeed);
thread.setPriority(Thread.MAX_PRIORITY);
thread.start();
}
else {
Log.i("FSKDecoder", "Please check the recorder settings, something is wrong!");
}
}
private void RecordAudio() {
if (mProgressListener == null) {
// A progress listener is mandatory here, as it will let us know when to stop recording.
return;
}
mInputFile = null;
mFileType = "raw";
mFileSize = 0;
mSampleRate = 44100;
mChannels = 1; // record mono audio.
short[] buffer = new short[1024]; // buffer contains 1 mono frame of 1024 16 bits samples
int minBufferSize = AudioRecord.getMinBufferSize(
mSampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
// make sure minBufferSize can contain at least 1 second of audio (16 bits sample).
if (minBufferSize < mSampleRate * 2) {
minBufferSize = mSampleRate * 2;
}
AudioRecord audioRecord = new AudioRecord(
MediaRecorder.AudioSource.DEFAULT,
mSampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
minBufferSize
);
// Allocate memory for 20 seconds first. Reallocate later if more is needed.
mDecodedBytes = ByteBuffer.allocate(20 * mSampleRate * 2);
mDecodedBytes.order(ByteOrder.LITTLE_ENDIAN);
mDecodedSamples = mDecodedBytes.asShortBuffer();
audioRecord.startRecording();
while (true) {
// check if mDecodedSamples can contain 1024 additional samples.
if (mDecodedSamples.remaining() < 1024) {
// Try to allocate memory for 10 additional seconds.
int newCapacity = mDecodedBytes.capacity() + 10 * mSampleRate * 2;
ByteBuffer newDecodedBytes = null;
try {
newDecodedBytes = ByteBuffer.allocate(newCapacity);
} catch (OutOfMemoryError oome) {
break;
}
int position = mDecodedSamples.position();
mDecodedBytes.rewind();
newDecodedBytes.put(mDecodedBytes);
mDecodedBytes = newDecodedBytes;
mDecodedBytes.order(ByteOrder.LITTLE_ENDIAN);
mDecodedBytes.rewind();
mDecodedSamples = mDecodedBytes.asShortBuffer();
mDecodedSamples.position(position);
}
// TODO(nfaralli): maybe use the read method that takes a direct ByteBuffer argument.
audioRecord.read(buffer, 0, buffer.length);
mDecodedSamples.put(buffer);
// Let the progress listener know how many seconds have been recorded.
// The returned value tells us if we should keep recording or stop.
if (!mProgressListener.reportProgress(
(float)(mDecodedSamples.position()) / mSampleRate)) {
break;
}
}
audioRecord.stop();
audioRecord.release();
mNumSamples = mDecodedSamples.position();
mDecodedSamples.rewind();
mDecodedBytes.rewind();
mAvgBitRate = mSampleRate * 16 / 1000;
// Temporary hack to make it work with the old version.
mNumFrames = mNumSamples / getSamplesPerFrame();
if (mNumSamples % getSamplesPerFrame() != 0){
mNumFrames++;
}
mFrameGains = new int[mNumFrames];
mFrameLens = null; // not needed for recorded audio
mFrameOffsets = null; // not needed for recorded audio
int i, j;
int gain, value;
for (i=0; i<mNumFrames; i++){
gain = -1;
for(j=0; j<getSamplesPerFrame(); j++) {
if (mDecodedSamples.remaining() > 0) {
value = Math.abs(mDecodedSamples.get());
} else {
value = 0;
}
if (gain < value) {
gain = value;
}
}
mFrameGains[i] = (int) Math.sqrt(gain); // here gain = sqrt(max value of 1st channel)...
}
mDecodedSamples.rewind();
// DumpSamples(); // Uncomment this line to dump the samples in a TSV file.
}
private void addWavHeader(FileOutputStream out, long totalAudioLen, long totalDataLen)
throws Exception {
long sampleRate = sampleRateInHz;
int channels = channelConfig == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
int bitsPerSample = audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 8 : 16;
long byteRate = sampleRate * channels * bitsPerSample / 8;
int blockAlign = channels * bitsPerSample / 8;
byte[] header = new byte[44];
header[0] = 'R'; // RIFF chunk
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff); // how big is the rest of this file
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W'; // WAVE chunk
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1 for PCM
header[21] = 0;
header[22] = (byte) channels; // mono or stereo
header[23] = 0;
header[24] = (byte) (sampleRate & 0xff); // samples per second
header[25] = (byte) ((sampleRate >> 8) & 0xff);
header[26] = (byte) ((sampleRate >> 16) & 0xff);
header[27] = (byte) ((sampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff); // bytes per second
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) blockAlign; // bytes in one sample, for all channels
header[33] = 0;
header[34] = (byte) bitsPerSample; // bits in a sample
header[35] = 0;
header[36] = 'd'; // beginning of the data chunk
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff); // how big is this data chunk
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}