http://blog.csdn.net/sea918/article/details/7249216 1、音频开发模型: OSS(open sound system) linux/unix 平台的上早期的统一音频接口。linux kernl 2.6 版本以前其它提供两种设备文件以供编程。 常用的操作函数为open、close、read、write、ioctl. (/dev/dsp录音设备文件/dev/audio播放设备文件) ALSA(a)目前流行的编译框架。linux 2.6 版本发后支持。 提供统一的编程接口:snd_pcm_open、snd_pcm_close、snd_pcm_hw_params 基设备文件为:/dev/snd/pcmC0D0p /dev/snd/pcmC0D0c /dev/snd/pcmC0D1p /dev/snd/timer 可以通过bash命令查看 alsa 驱动版本: root@ubuntu:cat /proc/asound/version Advanced linux Sound Architecture Driver Version 1.0.23 2,Alsa-lib 编译 a,下载并安装 alsa-lib库 root@ubuntu: tar -xvf alsa-lib-1.0.13.tar.bz2 root@ubuntu:./configure root@ubuntu:make root@ubuntu: make install 3,编程 a,添加头文件 #include
b,编程录音代码. */
/* Use the newer ALSA API */
#define ALSA_PCM_NEW_HW_PARAMS_API
#include
#include
#include
#define LENGTH 3 //录音时间,秒
#define RATE 9600 //采样频率
#define SIZE 16 //量化位数
#define CHANNELS 1 //声道数目
#define RSIZE 8 //buf的大小,
/********以下是wave格式文件的文件头格式说明******/
/*------------------------------------------------
| RIFF WAVE Chunk |
| ID = 'RIFF' |
| RiffType = 'WAVE' |
------------------------------------------------
| Format Chunk |
| ID = 'fmt ' |
------------------------------------------------
| Fact Chunk(optional) |
| ID = 'fact' |
------------------------------------------------
| Data Chunk |
| ID = 'data' |
------------------------------------------------*/
/**********以上是wave文件格式头格式说明***********/
/*wave 文件一共有四个Chunk组成,其中第三个Chunk可以省略,每个Chunk有标示(ID),
大小(size,就是本Chunk的内容部分长度),内容三部分组成*/
struct fhead
{
/****RIFF WAVE CHUNK*/
unsigned char a[4];//四个字节存放'R','I','F','F'
long int b; //整个文件的长度-8;每个Chunk的size字段,都是表示除了本Chunk的ID和SIZE字段外的长度;
unsigned char c[4];//四个字节存放'W','A','V','E'
/****RIFF WAVE CHUNK*/
/****Format CHUNK*/
unsigned char d[4];//四个字节存放'f','m','t',''
long int e; //16后没有附加消息,18后有附加消息;一般为16,其他格式转来的话为18
short int f; //编码方式,一般为0x0001;
short int g; //声道数目,1单声道,2双声道;
long int h; //采样频率;
long int i; //每秒所需字节数;
short int j; //每个采样需要多少字节,若声道是双,则两个一起考虑;
short int k; //即量化位数
/****Format CHUNK*/
/***Data Chunk**/
unsigned char p[4];//四个字节存放'd','a','t','a'
long int q; //语音数据部分长度,不包括文件头的任何部分
}wavehead;//定义WAVE文件的文件头结构体
int startRecord(void)
{
long loops;
int rc;
int size;
snd_pcm_t *handle;
snd_pcm_hw_params_t *params;
unsigned int val;
int dir;
snd_pcm_uframes_t frames;
char *buffer;
int fd_f;
int status;
/*以下wave 文件头赋值*/
wavehead.a[0]='R';
wavehead.a[1]='I';
wavehead.a[2]='F';
wavehead.a[3]='F';
wavehead.b=LENGTH*RATE*CHANNELS*SIZE/8-8;
wavehead.c[0]='W';
wavehead.c[1]='A';
wavehead.c[2]='V';
wavehead.c[3]='E';
wavehead.d[0]='f';
wavehead.d[1]='m';
wavehead.d[2]='t';
wavehead.d[3]=' ';
wavehead.e=16;
wavehead.f=1;
wavehead.g=CHANNELS;
wavehead.h=RATE;
wavehead.i=RATE*CHANNELS*SIZE/8;
wavehead.j=CHANNELS*SIZE/8;
wavehead.k=SIZE;
wavehead.p[0]='d';
wavehead.p[1]='a';
wavehead.p[2]='t';
wavehead.p[3]='a';
wavehead.q=LENGTH*RATE*CHANNELS*SIZE/8;
/*以上wave 文件头赋值*/
/* Open PCM device for recording (capture). */
rc = snd_pcm_open(&handle, "default",
SND_PCM_STREAM_CAPTURE, 0);
if (rc < 0) {
fprintf(stderr,
"unable to open pcm device: %s
",
snd_strerror(rc));
exit(1);
}
/* Allocate a hardware parameters object. */
snd_pcm_hw_params_alloca(¶ms);
/* Fill it in with default values. */
snd_pcm_hw_params_any(handle, params);
/* Set the desired hardware parameters. */
/* Interleaved mode */
snd_pcm_hw_params_set_access(handle, params,
SND_PCM_ACCESS_RW_INTERLEAVED);
/* Signed 16-bit little-endian format */
snd_pcm_hw_params_set_format(handle, params,
SND_PCM_FORMAT_S16_LE);
/* Two channels (stereo) */
snd_pcm_hw_params_set_channels(handle, params, CHANNELS);
/* 44100 bits/second sampling rate (CD quality) */
val = RATE;
snd_pcm_hw_params_set_rate_near(handle, params,
&val, &dir);
/* Set period size to 32 frames. */
frames = 32;
snd_pcm_hw_params_set_period_size_near(handle,
params, &frames, &dir);
/* Write the parameters to the driver */
rc = snd_pcm_hw_params(handle, params);
if (rc < 0) {
fprintf(stderr,
"unable to set hw parameters: %s
",
snd_strerror(rc));
exit(1);
}
/* Use a buffer large enough to hold one period */
snd_pcm_hw_params_get_period_size(params,
&frames, &dir);
size = frames * 2; /* 2 bytes/sample, 2 channels */
buffer = (char *) malloc(size);
/* We want to loop for 5 seconds */
snd_pcm_hw_params_get_period_time(params,
&val, &dir);
loops = 5000000 / val;
if(( fd_f = open("./sound.wav", O_CREAT|O_RDWR,0777))==-1)//创建一个wave格式语音文件
{
perror("cannot creat the sound file");
}
if((status = write(fd_f, &wavehead, sizeof(wavehead)))==-1)//写入wave文件的文件头
{
perror("write to sound'head wrong!!");
}
while (loops > 0) {
loops--;
rc = snd_pcm_readi(handle, buffer, frames);
if (rc == -EPIPE) {
/* EPIPE means overrun */
fprintf(stderr, "overrun occurred
");
snd_pcm_prepare(handle);
} else if (rc < 0) {
fprintf(stderr,
"error from read: %s
",
snd_strerror(rc));
} else if (rc != (int)frames) {
fprintf(stderr, "short read, read %d frames
", rc);
}
if(write(fd_f, buffer, size)==-1)
{
perror("write to sound wrong!!");
}
if (rc != size)
fprintf(stderr,
"short write: wrote %d bytes
", rc);
}
snd_pcm_drain(handle);
snd_pcm_close(handle);
free(buffer);
close(fd_f);
return 0;
}
OSS的编程
Linux下和声卡相关的文件有许多,如采集数字样本的/dev/dsp文件,针对混音器的/dev/mixer文件以及用于音序器的/dev /sequencer等。文件/dev/audio是一个基于兼容性考虑的声音设备文件,它实际是到上述数字设备的一个映射,它最大的特 {MOD}或许是对诸如 wav这类文件格式的直接支持。我们下面的例子即使用了此设备文件实现了一个简单的录音机:我们从声卡设备(当然要用麦克风)读取音频数据,并将它存放到 文件test.wav中去。要播放这个wav文件,只要如前面所述,使用命令cat test.wav >/dev/audio即可,当然你也可以用Linux下其他的多媒体软件来播放这个文件。
/* 此文件中定义了下面所有形如SND_的变量*/
#include
#include
#include
#include
#include
#include
#include
#include <string.h>
#include
#define OPEN_DSP_FAILED 0x00000001 /*打开 dsp 失败!*/
#define SAMPLERATE_STATUS 0x00000002 /*samplerate status failed*/
#define SET_SAMPLERATE_FAILED 0x00000003 /*set samplerate failed*/
#define CHANNELS_STATUS 0x00000004 /*Channels status failed*/
#define SET_CHANNELS_FAILED 0x00000005 /*set channels failed*/
#define FMT_STATUS 0x00000006 /*FMT status failed*/
#define SET_FMT_FAILED 0x00000007 /*set fmt failed*/
#define OPEN_FILE_FAILED 0x00000008 /*opem filed failed*/
int P8100_Audio_Play(char *pathname,int nSampleRate,int nChannels,int fmt)
{
int dsp_fd,mix_fd,status,arg;
dsp_fd = open("/dev/dsp" , O_RDWR); /*open dsp*/
if(dsp_fd < 0)
{
return OPEN_DSP_FAILED;
}
arg = nSampleRate;
status = ioctl(dsp_fd,SOUND_PCM_WRITE_RATE,&arg); /*set samplerate*/
if(status < 0)
{
close(dsp_fd);
return SAMPLERATE_STATUS;
}
if(arg != nSampleRate)
{
close(dsp_fd);
return SET_SAMPLERATE_FAILED;
}
arg = nChannels; /*set channels*/
status = ioctl(dsp_fd, SOUND_PCM_WRITE_CHANNELS, &arg);
if(status < 0)
{
close(dsp_fd);
return CHANNELS_STATUS;
}
if( arg != nChannels)
{
close(dsp_fd);
return SET_CHANNELS_FAILED;
}
arg = fmt; /*set bit fmt*/
status = ioctl(dsp_fd, SOUND_PCM_WRITE_BITS, &arg);
if(status < 0)
{
close(dsp_fd);
return FMT_STATUS;
}
if(arg != fmt)
{
close(dsp_fd);
return SET_FMT_FAILED;
}/*到此设置好了DSP的各个参数*/
FILE *file_fd = fopen(pathname,"r");
if(file_fd == NULL)
{
close(dsp_fd);
return OPEN_FILE_FAILED;
}
int num = 3*nChannels*nSampleRate*fmt/8;
int get_num;
char buf[num];
while(feof(file_fd) == 0)
{
get_num = fread(buf,1,num,file_fd);
write(dsp_fd,buf,get_num);
if(get_num != num)
{
close(dsp_fd);
fclose(file_fd);
return 0;
}
}
close(dsp_fd);
fclose(file_fd);
return 0;
}
int main()
{
int value;
value = P8100_Audio_Play("/windows/C/WINDOWS/Media/Windows Startup.wav",44100,2,16);
//注意播放文件的路径哦!!
fprintf(stderr,"value is %d",value);
return 0;
}