29,027
社区成员
发帖
与我相关
我的任务
分享
// 创建声音AVAssetWriterInput 的代码
AudioChannelLayout channelLayout;
memset(&channelLayout, 0, sizeof(AudioChannelLayout));
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
NSDictionary *outputSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[ NSNumber numberWithInt: kAudioFormatMPEG4AAC], AVFormatIDKey,
[ NSNumber numberWithInt: _src_channel_count_pcm], AVNumberOfChannelsKey,
[ NSNumber numberWithFloat: _src_sample_rate_pcm], AVSampleRateKey,
[NSData dataWithBytes:&channelLayout length:sizeof(AudioChannelLayout)], AVChannelLayoutKey,
//[ NSNumber numberWithInt: 128000 ], AVEncoderBitRateKey,
nil];
assetWriterInput_audio = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeAudio outputSettings:outputSettings];
// 初始化pcm -> aac 处代码
memset(&_srcPcmFormat, 0, sizeof(_srcPcmFormat));
_srcPcmFormat.mFormatID = kAudioFormatLinearPCM;
_srcPcmFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
_srcPcmFormat.mBitsPerChannel = _per_sample_bytes_pcm<<3;
_srcPcmFormat.mFramesPerPacket = 1;
_srcPcmFormat.mChannelsPerFrame = _src_channel_count_pcm;
_srcPcmFormat.mSampleRate = _src_sample_rate_pcm;
_srcPcmFormat.mBytesPerPacket
= _srcPcmFormat.mBytesPerFrame = (_srcPcmFormat.mBitsPerChannel >> 3) * _srcPcmFormat.mChannelsPerFrame;
memset(&_dstAACFormat, 0, sizeof(_dstAACFormat));
_dstAACFormat.mFormatID = kAudioFormatMPEG4AAC;
_dstAACFormat.mSampleRate = _srcPcmFormat.mSampleRate;
_dstAACFormat.mChannelsPerFrame = _srcPcmFormat.mChannelsPerFrame;
_dstAACFormat.mFormatFlags = kMPEG4Object_AAC_SSR;
_dstAACFormat.mFramesPerPacket = 1024;
_writerAACFormat = _dstAACFormat;
OSStatus status;
UInt32 size = sizeof(_dstAACFormat);
status = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &_dstAACFormat);
if (status != noErr) {
NSLog(@"initConverterPCMToAAC AudoFormatGetProperty kAudioFormatProperty_FormatInfo error:%d", status);
return false;
}
AudioClassDescription *description =getAudioClassDescriptionWithType(_dstAACFormat.mFormatID,kAppleSoftwareAudioCodecManufacturer);
if(!description){
description =getAudioClassDescriptionWithType(_dstAACFormat.mFormatID,kAppleSoftwareAudioCodecManufacturer);
}
if (!description) {
NSLog(@"initConverterPCMToAAC get audio class Descriptin error.");
return false;
}
status = AudioConverterReset(_audioConverterPCMToAAC);
status = AudioConverterNewSpecific(&_srcPcmFormat, &_dstAACFormat, 1, description, &_audioConverterPCMToAAC);
if (status != noErr) {
NSLog(@"initConverterPCMToAAC create audo converter error:%d", status);
return false;
}
_dstAACBufferSize = _per_frame_bytes_pcm;
_dstAACBuffer = malloc(_dstAACBufferSize);
// 转化pcm -> aac代码
if (!_audioConverterPCMToAAC) return nil;
OSStatus status = noErr;
FillComplexInputParam userParam;
userParam.source = srcPCMBuffer;
userParam.sourceSize = srcBufferSize;
userParam.channelCount = _srcPcmFormat.mChannelsPerFrame;
userParam.packetDescriptions = NULL;
memset(_dstAACBuffer, 0, _dstAACBufferSize);
AudioBufferList outAudioBufferList = {0};
outAudioBufferList.mNumberBuffers = 1;
outAudioBufferList.mBuffers[0].mNumberChannels = _dstAACFormat.mChannelsPerFrame;
outAudioBufferList.mBuffers[0].mDataByteSize = _dstAACBufferSize;
outAudioBufferList.mBuffers[0].mData = _dstAACBuffer;
UInt32 ioOutputDataPacketSize = 1;
AudioStreamPacketDescription *outPacketDescription = NULL;
outPacketDescription = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * ioOutputDataPacketSize);
status = AudioConverterFillComplexBuffer(_audioConverterPCMToAAC, audioConverterComplexInputDataProc, &userParam, &ioOutputDataPacketSize, &outAudioBufferList, outPacketDescription);
if (status == noErr) {
_writerAACFormat.mBytesPerPacket = outPacketDescription->mDataByteSize;
return [self sampleBufferFromAACData:outAudioBufferList.mBuffers[0].mData srcSize:outAudioBufferList.mBuffers[0].mDataByteSize timestamp:timestamp];
}
// aac代码转化 SampleBuffer代码
CMBlockBufferRef audioBlockBuf=NULL;
CMSampleBufferRef audioSampleBuf=NULL;
OSStatus result;
result=CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, NULL, srcSize, kCFAllocatorDefault, NULL, 0, srcSize, kCMBlockBufferAssureMemoryNowFlag, &audioBlockBuf);
result=CMBlockBufferReplaceDataBytes(src, audioBlockBuf, 0, srcSize);
static CMAudioFormatDescriptionRef audioFormatDes=NULL;
if(audioFormatDes==NULL)
{
result = CMAudioFormatDescriptionCreate(kCFAllocatorDefault,
&_writerAACFormat,
0,
NULL,
0,
NULL,
NULL,
&audioFormatDes
);
}
result = CMAudioSampleBufferCreateWithPacketDescriptions(
kCFAllocatorDefault, audioBlockBuf, TRUE, 0, NULL, audioFormatDes, 1, timestamp, NULL, &audioSampleBuf);
if (result != noErr) {
NSLog(@"sampleBufferFromAACData Failled to create aac samplebuffer.\n");
}
CFRelease(audioBlockBuf);
//添加样本
CMTime time = CMTimeMake (encodeFrames * _video_fps_num, _video_fps_den);
CMSampleBufferRef sampleBufRef = [self convertPCMToAAC:samples srcBufferSize:samplesSize timestamp:time];
if (sampleBufRef) {
BOOL result = [audioInput appendSampleBuffer:sampleBufRef];// 此处报错
if (result == NO) {
NSLog(@"Failed to append sample to audio,%@,%@\n",[assetWriter.error domain],[assetWriter.error localizedDescription]);
break;
}
while ((audioInput.readyForMoreMediaData)==NO )
{
[NSThread sleepForTimeInterval:0.01];
}
CFRelease(sampleBufRef);
}