|
吾爱游客
发表于 2017-3-19 15:55
1、申 请 I D : maozhenyu
2、个人邮箱:maozhenyu@foxmail.com
3、原创技术文章:
//这篇首发在SegmentFault上,也是第一篇在网络上发的技术文章。 原文地址:https://segmentfault.com/a/1190000007474125//那么我是不是要证明一下SegmentFault上的那个人是我呢。于是我在那篇文章的原文上增加了 “这篇文章被用作申请52pojie.cn的账号了,账号:maozhenyu 邮箱:maozhenyu@foxmail.com”,方便审核
iOS AudioQueue/AudioSession VoIP业务的实现
iOS SDK升级到7.0之后,Apple对AudioSession做了重大革新,因此很多接口都需要调整。基础概念首先我们得了解一下AudioSession和AudioQueue分别是什么东西
Session就好像我们家里音响设备的总管理
Queue负责具体实现播放和录音[AVAudioSession sharedInstance] 来获取AVAudioSession的实例加载AudioSession这里我们需要实现启动AudioSession、处理被中断(比如你在使用VoIP的时候,突然一个电话打进来……)AVAudioSession *session=[AVAudioSession sharedInstance]; //AVAudioSessionPortOverrideSpeaker if (![session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionMixWithOthers| AVAudioSessionCategoryOptionDefaultToSpeaker error:nil]) { //无法启动语音 return; } [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(routeChange:) name:AVAudioSessionRouteChangeNotification object:nil]; [[NSNotificationCenter defaultCenter] addObserverForName:AVAudioSessionInterruptionNotification object:session queue:nil usingBlock:^(NSNotification *notification) { int status=[[notification.userInfo valueForKey:AVAudioSessionInterruptionTypeKey] intValue]; if ( status== AVAudioSessionInterruptionTypeBegan) { //语音已被中断 AudioQueuePause(_recordQueue); //这时候暂停了录制的Queue } else if(status==AVAudioSessionInterruptionTypeEnded) { [[AVAudioSession sharedInstance] setActive:TRUE error:nil]; //重新激活 AudioQueueStart(_recordQueue,nil); //重启 恢复 } }]; if (![session setActive:YES error:&error]) //这里激活session { return; }AVAudioSessionCategoryPlayAndRecord:同时录音和放音AVAudioSessionCategoryOptionMixWithOthers| 与其他应用程序混音AVAudioSessionCategoryOptionDefaultToSpeaker 强制播放到扬声器(蓝牙无效,耳机插入有效)获取权限、加载和配置AudioQueue然后来做个基础的定义#define ProcessPeo 0.03#define PlayBaSam 48000#define RecordSam 44100实现VoIP,录音必不可少,不过需要请求和判断录音权限、然后加载录音用的AudioQueue(播放和录音互相独立了开来)://获取当前的录音权限switch ([AVAudioSession sharedInstance].recordPermission) { case AVAudioSessionRecordPermissionUndetermined: { UIAlertView *a = [[UIAlertView alloc] initWithTitle:@"授权提示" message:@"你需要授权" delegate:self cancelButtonTitle:@"好的" otherButtonTitles:nil, nil]; [a show]; break; } case AVAudioSessionRecordPermissionDenied: [[[CustomAlertView alloc] initWithTitle:@"您拒绝了使用麦克风的请求。如果需要恢复,请去系统设置。" message:@"TX无法使用" delegate:nil cancelButtonTitle:@"确定" otherButtonTitles: nil] show]; break; case AVAudioSessionRecordPermissionGranted: { break; } default: break; }//开始请求[session requestRecordPermission:^(BOOL granted) { if(granted) { //录音部分开始 AudioStreamBasicDescription _recordFormat; bzero(&_recordFormat, sizeof(AudioStreamBasicDescription)); _recordFormat.mSampleRate = RecordSam; _recordFormat.mFormatID = kAudioFormatLinearPCM; _recordFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; _recordFormat.mFramesPerPacket = 1; _recordFormat.mChannelsPerFrame = 1; _recordFormat.mBitsPerChannel = 16; _recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame; AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, NULL, 0, &_recordQueue); int bufferByteSize = ceil(ProcessPeo * _recordFormat.mSampleRate) * _recordFormat.mBytesPerFrame; for (int i = 0; i < 1; i++){ AudioQueueAllocateBuffer(_recordQueue, bufferByteSize, &_recBuffers); AudioQueueEnqueueBuffer(_recordQueue, _recBuffers, 0, NULL); } AudioQueueStart(_recordQueue, NULL); //录音部分结束 } else{ //移动到上面处理 } }];录音的启动了,现在开始放音的//播放部分开始 AudioStreamBasicDescription audioFormat; bzero(&audioFormat, sizeof(AudioStreamBasicDescription)); audioFormat.mSampleRate = PlayBaSam; audioFormat.mFormatID = kAudioFormatLinearPCM; audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked; audioFormat.mFramesPerPacket = 1; audioFormat.mChannelsPerFrame = 1; audioFormat.mBitsPerChannel = 16; audioFormat.mBytesPerPacket = audioFormat.mBytesPerFrame = (audioFormat.mBitsPerChannel / 8) * audioFormat.mChannelsPerFrame; AudioQueueNewOutput(&audioFormat,outputBufferHandler, (__bridge void *)(self), NULL,NULL, 0, &_playQueue); int bufferByteSize = ceil(ProcessPeo * audioFormat.mSampleRate) * audioFormat.mBytesPerFrame; //上面的乘法是准备了缓冲区的秒数 我这里用了0.03秒,缓冲区越大延迟会越高 //下面开始创建缓冲区 for(int i=0;i<2;i++) { AudioQueueAllocateBuffer(_playQueue, bufferByteSize, &_playBuffers); _playBuffers->mAudioDataByteSize=bufferByteSize; outputBufferHandler(nil,_playQueue,_playBuffers); } AudioQueueStart(_playQueue, NULL);这样录音和播放部分就开始了,注意,AudioQueue的录音得到的数据处理和提供播放数据在回调里实现(和Android不同,属于被动的)回调:真正实现录音和播放首先实现录音的回调void inputBufferHandler(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime,UInt32 inNumPackets, const AudioStreamPacketDescription *inPacketDesc){ if (inNumPackets > 0) { //数据在inBuffer->mAudioData 数据大小:inNumPackets } } AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL); //这里准备缓冲区 继续执行下去}播放回调:static void outputBufferHandler(void *inUserData,AudioQueueRef inAQ,AudioQueueBufferRef buffer){ uint error=0; 填充buffer->mAudioData 大小是缓冲区大小 AudioQueueEnqueueBuffer(inAQ, buffer, 0, NULL);}特别注意:播放回调中必须一直填充缓冲区数据,否则播放会被自动停止这样下来,简单PS. 刚学Objective-C 不到1个月,如果文章中有问题,欢迎批评指正!
|
|
发帖前要善用【论坛搜索】功能,那里可能会有你要找的答案或者已经有人发布过相同内容了,请勿重复发帖。 |
|
|
|
|