因为上一篇博客代码有点太多,就单开了,这两天主要在看android语音录制和压缩转码相关知识,前端时间看见腾讯官方微博宣布,已经开放出即使聊天软件正在等待审批,但这个直接影响电话运营商,能通过的可能性应该不大,但我对这方面的技术很有兴趣,所以就试试看。其中涉及到很多声音方面的处理问题,在android中有两个类可以录制语音:AudioRecord和MediaRecorder,MediaRecorder主要是录制音频并写入文件,而AudioRecord主要是录制音频流,录制的音频流为pcm格式,关于pcm格式可以自行搜索一下,在传输过程中可以转换为amr格式,但没有相关可以类库有点麻烦,另外iphone不支持播放amr格式音频,如果需要跨两个平台可以使用AAC,压缩比也不错,音质也很好,我还没有测试过,网上评议而已。编码方面大家都推荐speex,我看了一下,需要是用System.loadLibrary加载进speex提供的类库。下面记录一下写的一个边录制边播放的一段代码吧:
View Code JAVA
package voice.hcent.com;
import java.io.IOException;
import android.app.Activity;
import android.os.Bundle;
import android.os.Looper;
import android.os.Process;
import android.util.Log;
import android.view.MotionEvent;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioTrack;
import android.media.MediaRecorder;
public class VoiceHcentActivity extends Activity {
static {
System.loadLibrary("media_jni");
}
public int frequency = 8000;
private int rBufferSize, pBufferSize;
private Button startSpeech;
private AudioRecord recorder;
private VoiceSpeech vspeech;
private AudioTrack player;
private boolean stopSpeech = false;
/** Called when the activity is first created. */
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
init();
test();
}
public void init(){
try{
startSpeech = (Button)findViewById(R.id.StartSpeech);
//设置播放器缓冲区大小
pBufferSize = AudioTrack.getMinBufferSize(frequency, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
//获取播放器对象
player = new AudioTrack(AudioManager.STREAM_MUSIC, frequency,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
pBufferSize, AudioTrack.MODE_STREAM);
//设置录音缓冲区大小
rBufferSize = AudioRecord.getMinBufferSize(frequency,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT);
//获取录音机对象
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
frequency, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, rBufferSize);
}catch (Exception e) {
String msg = "ERROR init: "+e.getStackTrace();
VoiceHcentActivity.this.toastMsg(msg);
}
}
/**
* 开始录音
*/
public void startRecord(){
stopSpeech = false;
vspeech = new VoiceSpeech();
vspeech.start();
}
/**
* 结束录音
*/
public void stopRecord() {
stopSpeech = true;
}
/**
* 开始播放录音
*/
public void startPlay(){
//设置播放器音量
player.setStereoVolume(0.7f, 0.7f);
player.play();
}
/**
* 结束播放录音
*/
public void stopPlay(){
player.stop();
}
public void test(){
startSpeech.setOnTouchListener(new View.OnTouchListener() {
public boolean onTouch(View arg0, MotionEvent arg1) {
switch (arg1.getAction()) {
case MotionEvent.ACTION_DOWN: //开始说话
startPlay();
startRecord();
toastMsg("starting record!");
break;
case MotionEvent.ACTION_UP: //停止说话
Log.i("hcent", "111");
stopPlay();
Log.i("hcent", "222");
stopRecord();
toastMsg("stoped record!");
break;
default:
break;
}
return false;
}
});
}
public class VoiceSpeech extends Thread{
@Override
public void run() {
super.run();
try {
byte[] tempBuffer, readBuffer = new byte[rBufferSize];
int bufResult = 0;
recorder.startRecording();
while(!stopSpeech){
bufResult = recorder.read(readBuffer, 0, rBufferSize);
if(bufResult>0 && bufResult%2==0){
tempBuffer = new byte[bufResult];
System.arraycopy(readBuffer, 0, tempBuffer, 0, rBufferSize);
player.write(tempBuffer, 0, tempBuffer.length);
}
Log.d("hcent", "get read:"+bufResult+"___"+readBuffer.length);
}
recorder.stop();
Looper.prepare();
VoiceHcentActivity.this.toastMsg("AudioSpeech have ended!");
Looper.loop();
} catch (Exception e) {
String msg = "ERROR AudioRecord: "+e.getStackTrace();
Looper.prepare();
VoiceHcentActivity.this.toastMsg(msg);
Looper.loop();
}
}
}
@Override
protected void onDestroy(){
player.release();
recorder.release();
super.onDestroy();
Process.killProcess(Process.myPid());
}
public void toastMsg(String msg){
Toast.makeText(this, msg, Toast.LENGTH_SHORT).show();
Log.e("hcent", msg);
}
}