Add a simple sample 'rtsp_player', which will play video on iOS devices given an rtsp link.
diff --git a/samples/rtsp_player/.DS_Store b/samples/rtsp_player/.DS_Store
new file mode 100644
index 0000000..d4ad6ea
--- /dev/null
+++ b/samples/rtsp_player/.DS_Store
Binary files differ
diff --git a/samples/rtsp_player/Classes/DisplayView.h b/samples/rtsp_player/Classes/DisplayView.h
new file mode 100755
index 0000000..b30107a
--- /dev/null
+++ b/samples/rtsp_player/Classes/DisplayView.h
@@ -0,0 +1,28 @@
+//
+// DisplayView.h
+// iPlayer
+//
+// Created by Gu Update on 09-7-15.
+// Copyright 2009 __MyCompanyName__. All rights reserved.
+//
+
+#import <UIKit/UIKit.h>
+
+
+@interface DisplayView : UIView {
+@protected
+ CGContextRef _context;
+ CGImageRef _image;
+ BOOL _bFull;
+ BOOL _bHeng;
+}
+
+@property (nonatomic,readonly) CGContextRef context;
+@property (nonatomic,readonly) CGImageRef image;
+@property (nonatomic) BOOL fullScreen;
+@property (nonatomic) BOOL bHeng;
+
+- (void)setCGImage:(CGImageRef)cgimg;
+- (void)saveImage;
+
+@end
diff --git a/samples/rtsp_player/Classes/DisplayView.m b/samples/rtsp_player/Classes/DisplayView.m
new file mode 100755
index 0000000..d744947
--- /dev/null
+++ b/samples/rtsp_player/Classes/DisplayView.m
@@ -0,0 +1,168 @@
+//
+// DisplayView.m
+// iPlayer
+//
+// Created by Gu Update on 09-7-15.
+// Copyright 2009 __MyCompanyName__. All rights reserved.
+//
+
+#import "DisplayView.h"
+#import <mach/mach_time.h>
+
+@implementation DisplayView
+@synthesize context=_context,image=_image,fullScreen=_bFull,bHeng=_bHeng;
+NSCondition * imageCondition;//用于同步_image的锁
+- (id)initWithFrame:(CGRect)frame {
+ if (self = [super initWithFrame:frame]) {
+ // Initialization code
+ _context=nil;
+ _image=nil;
+ _bFull=YES;
+ _bHeng=NO;
+ imageCondition=[[NSCondition alloc]init];
+ }
+ return self;
+}
+
+- (id)initWithCoder:(NSCoder *)decoder
+{
+ if (self = [super initWithCoder:decoder]) {
+ // Initialization code
+ _context=nil;
+ _image=nil;
+ _bFull=YES;
+ _bHeng=NO;
+ imageCondition=[[NSCondition alloc]init];
+ }
+ return self;
+}
+
+- (void)drawRect:(CGRect)rect {
+ // Drawing code
+ _context=UIGraphicsGetCurrentContext();
+ if (_image == NULL) {
+#if 1
+ UIImage *defaultScreen = [UIImage imageNamed:@"default_screen_black.png"];
+#else
+ UIImage *defaultScreen = [UIImage imageNamed:@"bxs_display.png"];
+#endif
+ _image = CGImageRetain(defaultScreen.CGImage);
+ }
+ if(_image)
+ {
+ short iw=CGImageGetWidth(_image),ih=CGImageGetHeight(_image);
+ short sw=rect.size.width,sh=rect.size.height;
+ short w=iw,h=ih;
+
+ if(sw==0 || sh==0 || iw==0 || ih==0)
+ return;
+ //*
+ if(_bHeng)
+ {
+ if (_bFull) {
+ w = sw;
+ h = sh;
+ } else {
+ if(iw*sh>=sw*ih)
+ {
+ w=sw;
+ h=w*ih/iw;
+ }
+ else
+ {
+ h=sh;
+ w=h*iw/ih;
+ }
+ }
+
+ }
+ else
+ {
+ if(_bFull)
+ {
+ if(iw*sh>=sw*ih)
+ {
+ w=sw;
+ h=sh;//w*ih/iw;
+ }
+ else
+ {
+ h=sh;
+ w=sw;//h*iw/ih;
+ }
+ }
+ }
+ /*同步_image对象*/
+ [imageCondition lock];
+ if (_image)
+ {
+ CGContextDrawImage(_context, CGRectMake((sw-w)/2, (sh-h)/2, w, h), _image);
+ }
+ [imageCondition unlock];
+
+#if 0
+ /* 测试用,显示帧数 */
+ CGAffineTransform af = CGContextGetCTM(_context);
+ static mach_timebase_info_data_t time_info;
+ mach_timebase_info(&time_info);
+ static uint64_t lastTimer;
+ uint64_t now = mach_absolute_time();
+ int fps = 1000 / ((now - lastTimer) * time_info.numer/ time_info.denom /1000000);
+ lastTimer = now;
+ [[UIColor redColor] set];
+ static int i = 0;
+ NSString *s = [NSString stringWithFormat:@"F: %d FPS:%d", ++i, fps];
+ CGContextConcatCTM(_context, af);
+ [s drawAtPoint:CGPointMake(0, 30) withFont:[UIFont systemFontOfSize:40]];
+#endif
+
+ }
+}
+
+- (void)setCGImage:(CGImageRef)cgimg
+{
+ @synchronized(self)
+ {
+ [imageCondition lock];
+ if(_image)
+ {
+ CGImageRelease(_image);
+ }
+ _image=cgimg;
+ [imageCondition unlock];
+ }
+ //[self setNeedsDisplay];
+}
+
+- (void)saveImage
+{
+ [imageCondition lock];
+ UIImage* img=nil;
+ if (_image) {
+ img=[[UIImage alloc] initWithCGImage:_image];
+ }
+ [imageCondition unlock];
+ if(img)
+ {
+ UIImageWriteToSavedPhotosAlbum(img , nil, nil, nil);
+ [img release];
+ }
+}
+
+
+- (void)dealloc {
+ @synchronized(self)
+ {
+ [imageCondition lock];
+ if(_image)
+ {
+ CGImageRelease(_image);
+ }
+ [imageCondition unlock];
+ }
+ [imageCondition release];
+ [super dealloc];
+}
+
+
+@end
diff --git a/samples/rtsp_player/Classes/H264/VideoDecode.cpp b/samples/rtsp_player/Classes/H264/VideoDecode.cpp
new file mode 100755
index 0000000..982f548
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/VideoDecode.cpp
@@ -0,0 +1,626 @@
+// VideoDecode.cpp: implementation of the CVideoDecode class.
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "VideoDecode.h"
+
+
+//////////////////////////////////////////////////////////////////////
+// Construction/Destruction
+//////////////////////////////////////////////////////////////////////
+/*************************************************
+Function: CVideoDecode
+Description: ππ‘Ï∫Ø ˝
+Parameters: ø’
+Return: ø’
+use: CFilePlayer
+Call:
+Called By:
+IO:
+Last Modified: 2009-04-21
+Others:
+*************************************************/
+#ifndef SAFE_DELETE
+#define SAFE_DELETE(p) {if(p != NULL) { delete (p); (p) = NULL; } } //Delete object by New create
+#endif
+
+#ifndef SAFE_DELETEA
+#define SAFE_DELETEA(p) {if(p != NULL) { delete[] (p); (p) = NULL; } } //Delete Arrary
+#endif
+
+#ifndef CheckPointer
+#define CheckPointer(pointer,hr) {if(pointer == NULL) return hr;};
+#endif
+
+#define TRUE 1
+#define FALSE 0
+
+
+void SetSupportIFrame(PCVIDEO_DECODE pvideo, int bSupportIFrame)
+{
+ pvideo->m_bSupportIFrame = bSupportIFrame;
+}
+
+/*************************************************
+Function: UnInitDecode
+Description: ∑¥≥ı ºªØ ”∆µΩ‚¬Î∆˜
+Parameters: ø’
+Return: int «∑Òµ˜”√≥…𶣨TRUE±Ì æ≥…π¶,∆‰À¸ ß∞‹
+use:
+ m_pFilePlayer u
+Call:
+Called By: ~CVideoDecode()
+IO:
+Last Modified: 2009-04-21
+Others:
+*************************************************/
+int UnInitDecode(PCVIDEO_DECODE pvideo, int videoFormat)
+{
+ pvideo->m_nVideoFormat = videoFormat;//PIX_FMT_YUV420P;
+ pvideo->m_pAVCodecContext = NULL;
+ pvideo->m_pSwsContext = NULL;
+ pvideo->m_pPicture = NULL;
+ pvideo->m_pFrameYUV = NULL;
+ pvideo->m_pAVCodec = NULL;
+ pvideo->m_pVideoBuffer = NULL;
+ pvideo->m_bInitSuccess = 0;
+ pvideo->m_bSupportIFrame = 1;
+ pvideo->m_bInitSuccess = 0;
+
+#if 0
+ SAFE_DELETEA(pvideo->m_pVideoBuffer);
+ if (pvideo->m_pAVCodecContext)
+ {
+ avcodec_close(pvideo->m_pAVCodecContext);
+ av_free(pvideo->m_pAVCodecContext);
+ pvideo->m_pAVCodecContext = NULL;
+ }
+ if(pvideo->m_pPicture)
+ {
+ av_free(pvideo->m_pPicture);
+ pvideo->m_pPicture = NULL;
+ }
+#else
+ SAFE_DELETEA(pvideo->m_pVideoBuffer);
+
+ if(pvideo->m_pAVCodecContext)
+ {
+ avcodec_close(pvideo->m_pAVCodecContext);
+ av_free(pvideo->m_pAVCodecContext);
+ pvideo->m_pAVCodecContext = NULL;
+ }
+
+ if(pvideo->m_pSwsContext)
+ {
+ sws_freeContext(pvideo->m_pSwsContext);
+ pvideo->m_pSwsContext = NULL;
+ }
+
+ if(pvideo->m_pPicture)
+ {
+ av_free(pvideo->m_pPicture);
+ pvideo->m_pPicture = NULL;
+ }
+
+ if(pvideo->m_pFrameYUV)
+ {
+ av_free(pvideo->m_pFrameYUV);
+ pvideo->m_pFrameYUV = NULL;
+ }
+#endif
+ return TRUE;
+}
+
+/*************************************************
+Function: InitDecode
+Description: ≥ı ºªØ ”∆µΩ‚¬Î∆˜
+Parameters: nVideoWidth in ”∆µÕºœÒøÌ∂»
+ nVideoHeight in ”∆µÕºœÒ∏fl∂»
+ nDecCodeID in Ω‚¬Î∆˜¿‡–Õ
+ nVideoFormat in ”∆µÕºœÒ∏Ò Ω
+
+Return: int µ˜”√ «∑Ò≥…π¶. RUE±Ì æ≥…π¶,∆‰À¸ ß∞‹
+use:
+ m_pFilePlayer u
+Call:
+Called By:
+IO:
+Last Modified: 2009-04-21
+Others:
+*************************************************/
+int InitDecode(PCVIDEO_DECODE pvideo, int nVideoWidth,int nVideoHeight,enum CodecID emDecCodeID,int nVideoFormat)
+{
+
+ UnInitDecode (pvideo, nVideoFormat);
+
+#if 0
+ avcodec_init();
+ avcodec_register_all();
+
+ av_init_packet(&(pvideo->avpkt));
+ pvideo->m_pAVCodec = avcodec_find_decoder(emDecCodeID);
+ if(!(pvideo->m_pAVCodec))
+ {
+ fprintf(stderr, "Codec not found.\n");
+ return -1;
+ }
+
+ pvideo->m_pAVCodecContext = avcodec_alloc_context();
+ pvideo->m_pPicture = avcodec_alloc_frame();
+ pvideo->m_pAVCodecContext->coded_height = nVideoWidth;
+ pvideo->m_pAVCodecContext->coded_width = nVideoHeight;
+ pvideo->m_pAVCodecContext->width = nVideoWidth;
+ pvideo->m_pAVCodecContext->height = nVideoHeight;
+ if(pvideo->m_pAVCodec->capabilities&CODEC_CAP_TRUNCATED)
+ pvideo->m_pAVCodecContext->flags|= CODEC_FLAG_TRUNCATED;
+
+ if (avcodec_open(pvideo->m_pAVCodecContext, pvideo->m_pAVCodec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ return FALSE;
+ }
+
+ if (pvideo->m_pVideoBuffer == NULL)
+ pvideo->m_pVideoBuffer = new uint8_t[nVideoWidth * nVideoHeight * 3 / 2];
+ if(pvideo->m_pVideoBuffer == NULL)
+ {
+ return FALSE;
+ }
+
+// if(avpicture_fill((AVPicture *)m_pFrameYUV,m_pVideoBuffer, (PixelFormat)nVideoFormat,nVideoWidth, nVideoHeight) < 0)
+// {
+// return FALSE;
+// }
+
+ pvideo->m_bInitSuccess = TRUE;
+
+ return TRUE;
+#else
+ pvideo->m_nVideoFormat = nVideoFormat;
+ pvideo->m_pAVCodecContext = NULL;
+
+ avcodec_init(); // ≥ı ºªØcodec
+ avcodec_register_all(); // ◊¢≤·codec
+
+ // CODEC_ID_H264°¢CODEC_ID_MPEG4°¢CODEC_ID_MPEG2VIDEO°¢CODEC_ID_MJPEGµ»
+ pvideo->m_pAVCodec = avcodec_find_decoder(emDecCodeID);
+ if (pvideo->m_pAVCodec == NULL)
+ {
+ return FALSE;
+ }
+
+ pvideo->m_pAVCodecContext = avcodec_alloc_context();
+
+ pvideo->m_pPicture = avcodec_alloc_frame();
+ pvideo->m_pFrameYUV = avcodec_alloc_frame();
+
+ if(pvideo->m_pAVCodecContext == NULL)
+ {
+ return FALSE;
+ }
+
+ if(pvideo->m_pFrameYUV == NULL || pvideo->m_pPicture == NULL)
+ {
+ return FALSE;
+ }
+
+ // ƒ¨»œ≈‰÷√H264 CODEC CONTEXT–≈œ¢
+ pvideo->m_pAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
+ pvideo->m_pAVCodecContext->codec_id = emDecCodeID;
+ pvideo->m_pAVCodecContext->width = nVideoWidth;
+ pvideo->m_pAVCodecContext->height = nVideoHeight;
+ pvideo->m_pAVCodecContext->pix_fmt = (PixelFormat)nVideoFormat;
+
+ int nRes = 0;
+ if ((nRes = avcodec_open(pvideo->m_pAVCodecContext, pvideo->m_pAVCodec)) < 0)
+ {
+ pvideo->m_pAVCodecContext = NULL;
+
+ return FALSE;
+ }
+
+ if (pvideo->m_pVideoBuffer == NULL)
+ pvideo->m_pVideoBuffer = new uint8_t[nVideoWidth * nVideoHeight * 3 / 2];
+ if(pvideo->m_pVideoBuffer == NULL)
+ {
+ return FALSE;
+ }
+
+ if(avpicture_fill((AVPicture *)pvideo->m_pFrameYUV,pvideo->m_pVideoBuffer, (PixelFormat)nVideoFormat,nVideoWidth, nVideoHeight) < 0)
+ {
+ return FALSE;
+ }
+ pvideo->m_bInitSuccess = TRUE;
+#endif
+ return TRUE;
+}
+
+/*************************************************
+Function: AVDecode
+Description: Ω‚¬Î—πÀıµƒ ”∆µ ˝æ›
+Parameters: pInputBuffer in –Ë“™Ω‚¬Îµƒ ”∆µ—πÀı ˝æ›
+ nInputSize in ‰»ÎµƒΩ‚¬Î ˝æ›µƒ≥§∂»
+ ppOutputBuffer out ±£¥ÊΩ‚¬Î ‰≥ˆµƒ‘≠ º ”∆µ ˝æ›µƒª∫¥Ê
+ bKeyFrame out «∑Ò «πÿº¸÷°
+
+Return: int µ˜”√ «∑Ò≥…π¶. RUE±Ì æ≥…π¶,∆‰À¸ ß∞‹
+use:
+ m_pFilePlayer u
+Call:
+Called By:
+IO:
+Last Modified: 2009-04-21
+Others:
+*************************************************/
+int AVDecode(PCVIDEO_DECODE pvideo, unsigned char *pInputBuffer,int nInputSize,unsigned char **ppOutputBuffer,int* bKeyFrame)
+{
+ // –£—È—πÀı ˝æ›
+ CheckPointer(pInputBuffer,FALSE);
+ if(nInputSize <= 0)
+ return FALSE;
+
+ // Œ¥≥ı ºªØΩ· ¯,≤ª‘ –ÌΩ‚¬Î
+ if(!(pvideo->m_bInitSuccess))
+ return FALSE;
+
+#if 0
+ int got_picture;
+ int ret;
+ int i, j;
+ pvideo->avpkt.size = nInputSize;
+ pvideo->avpkt.data = pInputBuffer;
+ int width = pvideo->m_pAVCodecContext->width;
+ int height = pvideo->m_pAVCodecContext->height;
+ while(pvideo->avpkt.size > 0)
+ {
+ int len = avcodec_decode_video2(pvideo->m_pAVCodecContext, pvideo->m_pPicture, &got_picture, &(pvideo->avpkt));
+ if (len <= 0) {
+ return FALSE;
+ }
+ if (got_picture) {
+ uint8_t* pp = pvideo->m_pPicture->data[0] + 0*(width+32) + 16;
+ uint8_t* pu = pvideo->m_pPicture->data[1] + 0*(width/2+16) + 8;
+ uint8_t* pv = pvideo->m_pPicture->data[2] + 0*(width/2+16) + 8;
+// printf("%d %d %d\n", pvideo->m_pPicture->linesize[0], pvideo->m_pPicture->linesize[1], pvideo->m_pPicture->linesize[2]);
+ unsigned char *outBuf = new unsigned char[width*height*4];
+ for (j =0; j<height; j++)
+ {
+ memcpy(outBuf+j*width, pp+j*(width+32), width);
+ if (!(j&1))
+ {
+ memcpy(outBuf+width*height+(j/2)*(width/2), pu+(j/2)*(width/2+16), width/2);
+ memcpy(outBuf+width*height*5/4+(j/2)*(width/2), pv+(j/2)*(width/2+16), width/2);
+ }
+ }
+ //memcpy(*ppOutputBuffer, outBuf, width*height*3/2);
+ //delete [] outBuf;
+ *ppOutputBuffer = outBuf;
+ }
+ pvideo->avpkt.size -= len;
+ pvideo->avpkt.data += len;
+ }
+ if(pvideo->m_bSupportIFrame)
+ {
+ // πÿº¸÷°
+ *bKeyFrame = pvideo->m_pAVCodecContext->coded_frame->key_frame;
+ }
+ else
+ {
+ *bKeyFrame = TRUE;
+ }
+
+ return TRUE;
+#else
+
+ int nRes = 0;
+ pvideo->avpkt.size = nInputSize;
+ pvideo->avpkt.data = pInputBuffer;
+
+ if ((nRes = avcodec_decode_video2(pvideo->m_pAVCodecContext, pvideo->m_pPicture, &(pvideo->m_nGetPicture),&(pvideo->avpkt))) <= 0)
+ {
+ return FALSE;
+ }
+ if (pvideo->m_nGetPicture)
+ {
+ if(pvideo->m_pSwsContext == NULL)
+ {
+ pvideo->m_pSwsContext = sws_getContext( pvideo->m_pAVCodecContext->width,
+ pvideo->m_pAVCodecContext->height,
+ pvideo->m_pAVCodecContext->pix_fmt,
+ pvideo->m_pAVCodecContext->width,
+ pvideo->m_pAVCodecContext->height,
+ (PixelFormat)pvideo->m_nVideoFormat,
+ /*SWS_FAST_BILINEAR*/SWS_BICUBIC,
+ NULL, NULL, NULL );
+ CheckPointer(pvideo->m_pSwsContext,FALSE);
+ }
+
+ sws_scale(pvideo->m_pSwsContext,pvideo->m_pPicture->data,
+ pvideo->m_pPicture->linesize,
+ 0,
+ pvideo->m_pAVCodecContext->height,
+ pvideo->m_pFrameYUV->data,
+ pvideo->m_pFrameYUV->linesize);
+
+ *ppOutputBuffer = pvideo->m_pFrameYUV->data[0];
+
+
+ //AVPicture outpicture;
+ //outpicture.data[0] = * ppOutputBuffer;
+ //outpicture.linesize[0] = ((AVCodecContext *) (pvideo->m_pAVCodecContext))->width*4;
+ //YUV2RGBFUNC(yuv2rgb_c_32, long, 0);
+
+ if(pvideo->m_bSupportIFrame)
+ {
+ // πÿº¸÷°
+ *bKeyFrame = 1;//pvideo->m_pAVCodecContext->coded_frame->key_frame;
+ }
+ else
+ {
+ *bKeyFrame = TRUE;
+ }
+
+ return TRUE;
+ }
+
+
+#endif
+
+ return FALSE;
+}
+
+// Convert from RGB to YUV420
+int RGB2YUV_YR[256], RGB2YUV_YG[256], RGB2YUV_YB[256];
+int RGB2YUV_UR[256], RGB2YUV_UG[256], RGB2YUV_UBVR[256];
+int RGB2YUV_VG[256], RGB2YUV_VB[256];
+// Conversion from YUV420 to RGB24
+static long int crv_tab[256];
+static long int cbu_tab[256];
+static long int cgu_tab[256];
+static long int cgv_tab[256];
+static long int tab_76309[256];
+static unsigned char clp[1024]; //for clip in CCIR601
+
+//
+// Table used for RGB to YUV420 conversion
+//
+void InitLookupTable()
+{
+ int i;
+ for (i = 0; i < 256; i++) RGB2YUV_YR[i] = (float)65.481 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_YG[i] = (float)128.553 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_YB[i] = (float)24.966 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_UR[i] = (float)37.797 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_UG[i] = (float)74.203 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_VG[i] = (float)93.786 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_VB[i] = (float)18.214 * (i<<8);
+ for (i = 0; i < 256; i++) RGB2YUV_UBVR[i] = (float)112 * (i<<8);
+}
+
+//
+//Initialize conversion table for YUV420 to RGB
+//
+void InitConvertTable()
+{
+ long int crv,cbu,cgu,cgv;
+ int i,ind;
+
+ crv = 104597; cbu = 132201; /* fra matrise i global.h */
+ cgu = 25675; cgv = 53279;
+
+ for (i = 0; i < 256; i++) {
+ crv_tab[i] = (i-128) * crv;
+ cbu_tab[i] = (i-128) * cbu;
+ cgu_tab[i] = (i-128) * cgu;
+ cgv_tab[i] = (i-128) * cgv;
+ tab_76309[i] = 76309*(i-16);
+ }
+
+ for (i=0; i<384; i++)
+ clp[i] =0;
+ ind=384;
+ for (i=0;i<256; i++)
+ clp[ind++]=i;
+ ind=640;
+ for (i=0;i<384;i++)
+ clp[ind++]=255;
+}
+
+
+
+void ConvertYUVtoRGB(unsigned char *src0,unsigned char *src1,unsigned char *src2,unsigned char *dst_ori,int width, int height)
+{
+ extern long int crv_tab[];
+ extern long int cbu_tab[];
+ extern long int cgu_tab[];
+ extern long int cgv_tab[];
+ extern long int tab_76309[];
+ int y11, y21;
+ int y12, y22;
+ int y13, y23;
+ int y14, y24;
+ int u, v;
+ int i, j;
+ int c11, c21, c31, c41;
+ int c12, c22, c32, c42;
+ unsigned int DW;
+ unsigned int *id1, *id2;
+ unsigned char *py1, *py2, *pu, *pv;
+ unsigned char *d1, *d2;
+
+ d1 = dst_ori;
+ d1 += width * height * 3 - width * 3;
+ d2 = d1 - width * 3;
+
+ py1 = src0;
+ pu = src1;
+ pv = src2;
+ py2 = py1 + width;
+
+ id1 = (unsigned int *) d1;
+ id2 = (unsigned int *) d2;
+ for (j = 0; j < height; j += 2)
+
+
+ {
+ /* line j + 0 */
+ for (i = 0; i < width; i += 4)
+ {
+ u = *pu++;
+ v = *pv++;
+ c11 = crv_tab[v];
+ c21 = cgu_tab[u];
+ c31 = cgv_tab[v];
+ c41 = cbu_tab[u];
+ u = *pu++;
+ v = *pv++;
+ c12 = crv_tab[v];
+ c22 = cgu_tab[u];
+ c32 = cgv_tab[v];
+ c42 = cbu_tab[u];
+
+ y11 = tab_76309[*py1++]; /* (255/219)*65536 */
+ y12 = tab_76309[*py1++];
+ y13 = tab_76309[*py1++]; /* (255/219)*65536 */
+ y14 = tab_76309[*py1++];
+
+ y21 = tab_76309[*py2++];
+ y22 = tab_76309[*py2++];
+ y23 = tab_76309[*py2++];
+ y24 = tab_76309[*py2++];
+
+ /* RGBR */
+ DW = ((clp[(y11 + c41) >> 16])) |
+ ((clp[(y11 - c21 - c31) >> 16]) << 8) |
+ ((clp[(y11 + c11) >> 16]) << 16) |
+ ((clp[(y12 + c41) >> 16]) << 24);
+ *id1++ = DW;
+
+ /* GBRG */
+ DW = ((clp[(y12 - c21 - c31) >> 16])) |
+ ((clp[(y12 + c11) >> 16]) << 8) |
+ ((clp[(y13 + c42) >> 16]) << 16) |
+ ((clp[(y13 - c22 - c32) >> 16]) << 24);
+ *id1++ = DW;
+
+ /* BRGB */
+ DW = ((clp[(y13 + c12) >> 16])) |
+ ((clp[(y14 + c42) >> 16]) << 8) |
+ ((clp[(y14 - c22 - c32) >> 16]) << 16) |
+ ((clp[(y14 + c12) >> 16]) << 24);
+ *id1++ = DW;
+
+ /* RGBR */
+ DW = ((clp[(y21 + c41) >> 16])) |
+ ((clp[(y21 - c21 - c31) >> 16]) << 8) |
+ ((clp[(y21 + c11) >> 16]) << 16) |
+ ((clp[(y22 + c41) >> 16]) << 24);
+ *id2++ = DW;
+
+ /* GBRG */
+ DW = ((clp[(y22 - c21 - c31) >> 16])) |
+ ((clp[(y22 + c11) >> 16]) << 8) |
+ ((clp[(y23 + c42) >> 16]) << 16) |
+ ((clp[(y23 - c22 - c32) >> 16]) << 24);
+ *id2++ = DW;
+
+ /* BRGB */
+ DW = ((clp[(y23 + c12) >> 16])) |
+ ((clp[(y24 + c42) >> 16]) << 8) |
+ ((clp[(y24 - c22 - c32) >> 16]) << 16) |
+ ((clp[(y24 + c12) >> 16]) << 24);
+ *id2++ = DW;
+ }
+ id1 -= (9 * width) >> 2;
+ id2 -= (9 * width) >> 2;
+ py1 += width;
+ py2 += width;
+ }
+}
+
+#define RGB4Y 1.164
+#define B4U 2.018
+#define Y2ADD 16
+#define G4U 0.391
+#define G4V 0.813
+#define U2ADD 128
+#define R4V 1.596
+#define V2ADD 128
+#define SCALEBITS 13
+#define FIX(x)((int) ((x) * (1L << SCALEBITS) + 0.5))
+
+int g_RGB4Y_Tab[256];
+int g_B4U_Tab[256];
+int g_G4U_Tab[256];
+int g_G4V_Tab[256];
+int g_R4V_Tab[256];
+void InitColorSpace()
+{
+ for (int i = 0; i < 256; i++)
+ {
+ g_RGB4Y_Tab[i]= FIX(RGB4Y) * (i - Y2ADD);
+ g_B4U_Tab[i]= FIX(B4U ) * (i - U2ADD);
+ g_G4U_Tab[i]= FIX(G4U ) * (i - U2ADD);
+ g_G4V_Tab[i]= FIX(G4V ) * (i - V2ADD);
+ g_R4V_Tab[i]= FIX(R4V ) * (i - V2ADD);
+ }
+}
+
+inline unsigned char ClipColorvalue(int x)
+{
+ return x < 0 ? 0 : (x > 255 ? 255 : x);
+}
+
+void YUV420_RGB32_4Pixel(unsigned char * pRGB, unsigned char * pY, unsigned char* pU, unsigned char* pV, int Width)
+{
+ int nRGB4Y = 0;
+ int nB4U = g_B4U_Tab[pU[0]];
+ int nG4UV = g_G4U_Tab[pU[0]] + g_G4V_Tab[pV[0]];
+ int nR4V = g_R4V_Tab[pV[0]];
+
+ // (0, 0)
+ nRGB4Y = g_RGB4Y_Tab[pY[0]];
+ pRGB[0] = ClipColorvalue((nRGB4Y + nB4U ) >> SCALEBITS);
+ pRGB[1] = ClipColorvalue((nRGB4Y - nG4UV) >> SCALEBITS);
+ pRGB[2] = ClipColorvalue((nRGB4Y + nR4V ) >> SCALEBITS);
+ pRGB[3] = 0;
+
+ // (0, 1)
+ nRGB4Y = g_RGB4Y_Tab[pY[1]];
+ pRGB[4] = ClipColorvalue((nRGB4Y + nB4U ) >> SCALEBITS);
+ pRGB[5] = ClipColorvalue((nRGB4Y - nG4UV) >> SCALEBITS);
+ pRGB[6] = ClipColorvalue((nRGB4Y + nR4V ) >> SCALEBITS);
+ pRGB[7] = 0;
+
+ // (1, 0)
+ nRGB4Y = g_RGB4Y_Tab[pY[Width]];
+ pRGB[(Width<<2)+0] = ClipColorvalue((nRGB4Y + nB4U ) >> SCALEBITS);
+ pRGB[(Width<<2)+1] = ClipColorvalue((nRGB4Y - nG4UV) >> SCALEBITS);
+ pRGB[(Width<<2)+2] = ClipColorvalue((nRGB4Y + nR4V ) >> SCALEBITS);
+ pRGB[(Width<<2)+3] = 0;
+
+ // (1, 1)
+ nRGB4Y = g_RGB4Y_Tab[pY[Width+1]];
+ pRGB[(Width<<2)+4] = ClipColorvalue((nRGB4Y + nB4U ) >> SCALEBITS);
+ pRGB[(Width<<2)+5] = ClipColorvalue((nRGB4Y - nG4UV) >> SCALEBITS);
+ pRGB[(Width<<2)+6] = ClipColorvalue((nRGB4Y + nR4V ) >> SCALEBITS);
+ pRGB[(Width<<2)+7] = 0;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+//
+void YUV420_RGB32(unsigned char * pRGB, unsigned char* pYUV, int Width, int Height)
+{
+ unsigned char * pY = pYUV, *pU = pY + Width * Height, *pV = pU + Width * Height/4;
+ //unsigned char * pY = pYUV, *pV = pY + Width * Height, *pU = pV + Width * Height/4;
+ short x, y;
+ for (y = 0; y < Height; y += 2)
+ {
+ for (x = 0; x < Width; x += 2)
+ {
+ YUV420_RGB32_4Pixel(pRGB, pY, pU, pV, Width);
+ pRGB += 8; pY += 2; pU += 1; pV += 1;
+ }
+ pRGB += Width<<2;
+ pY += Width;
+ }
+}
\ No newline at end of file
diff --git a/samples/rtsp_player/Classes/H264/VideoDecode.h b/samples/rtsp_player/Classes/H264/VideoDecode.h
new file mode 100755
index 0000000..b47910d
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/VideoDecode.h
@@ -0,0 +1,70 @@
+/*************************************************
+Copyright (C), 1998-2009
+File name: CVideoDecode.h
+Author: dongchaomissyou
+Version: V1.0
+Date: 2009-05-04
+Description: ÊÓÆµ½âÂëÀà,Ö§³Ö³£ÓõĽâÂëËã·¨
+ ÈçMPEG4¡¢MPEG2¡¢MJPEG¡¢H264µÈ
+*************************************************/
+#if !defined(_VIDEO_DEC_H)
+#define _VIDEO_DEC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "libavformat/avformat.h"
+#include "libavcodec/avcodec.h"
+#include "libswscale/swscale.h"
+#ifdef __cplusplus
+}
+#endif
+
+
+typedef struct CVideoDecode
+{
+
+ AVCodec* m_pAVCodec; // ffmpeg¶¨ÒåµÄ½á¹¹
+ AVCodecContext* m_pAVCodecContext; // ffmpeg¶¨ÒåµÄ½á¹¹
+ struct SwsContext* m_pSwsContext;
+ AVFrame* m_pPicture; // ffmpeg¶¨ÒåµÄ½á¹¹
+ AVFrame* m_pFrameYUV; // ffmpeg¶¨ÒåµÄ½á¹¹
+ AVPacket avpkt; //add by wayde
+ uint8_t* m_pVideoBuffer; // ½âÂëÆ÷ÄÚ²¿´æ·Å½âÂëºóÊý¾ÝµÄÄÚ´æÇøµØÖ·
+ int m_nGetPicture; // ½âÂëÒ»Ö¡³É¹¦ºó´ËֵΪ1£¬Ê§°Ü´ËֵΪ0
+ int m_nVideoFormat; // ½âÂëºóÊÓÆµ¸ñʽ
+ int m_bSupportIFrame; // ÊÇ·ñÖ§³ÖIÖ¡Êä³ö
+
+ int m_bInitSuccess; // ³õʼ»¯³É¹¦±êÖ¾
+
+}CVIDEO_DECODE, *PCVIDEO_DECODE;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// ³õʼ»¯½âÂëÆ÷
+int InitDecode(PCVIDEO_DECODE pvideo, int nVideoWidth,int nVideoHeight,enum CodecID emDecCodeID,int nVideoFormat );
+
+// ·´³õʼ»¯½âÂëÆ÷
+int UnInitDecode(PCVIDEO_DECODE pvideo);
+
+// ½âÂ뺯Êý
+int AVDecode(PCVIDEO_DECODE pvideo, unsigned char *pInputBuffer,int nInputSize,unsigned char **ppOutputBuffer,int* bKeyFrame);
+void SetSupportIFrame(PCVIDEO_DECODE pvideo, int bSupportIFrame);
+
+void InitRgbYuv();
+ void Yuv12ToRgb32(int iWidth, int iHeight, unsigned char * pYuv, unsigned char * pRgb);
+ void Yuv12ToRgb24(int iWidth, int iHeight, unsigned char * pYuv, unsigned char * pRgb);
+ void ConvertYUVtoRGB(unsigned char *src0,unsigned char *src1,unsigned char *src2,unsigned char *dst_ori,int width, int height);
+ void InitConvertTable();
+
+ void YUV420_RGB32(unsigned char * pRGB, unsigned char* pYUV, int Width, int Height);
+ void InitColorSpace();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/samples/rtsp_player/Classes/H264/decodeColorConvert.a b/samples/rtsp_player/Classes/H264/decodeColorConvert.a
new file mode 100755
index 0000000..e299042
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/decodeColorConvert.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/H264/decodeCommon.a b/samples/rtsp_player/Classes/H264/decodeCommon.a
new file mode 100755
index 0000000..cb05397
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/decodeCommon.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/H264/decodeParase.a b/samples/rtsp_player/Classes/H264/decodeParase.a
new file mode 100755
index 0000000..77e42bb
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/decodeParase.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/H264/decodePeripheraDevice.a b/samples/rtsp_player/Classes/H264/decodePeripheraDevice.a
new file mode 100755
index 0000000..74d01fd
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/decodePeripheraDevice.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/H264/decodec.a b/samples/rtsp_player/Classes/H264/decodec.a
new file mode 100755
index 0000000..ffefea3
--- /dev/null
+++ b/samples/rtsp_player/Classes/H264/decodec.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/iPlayerAppDelegate.h b/samples/rtsp_player/Classes/iPlayerAppDelegate.h
new file mode 100755
index 0000000..f9b479c
--- /dev/null
+++ b/samples/rtsp_player/Classes/iPlayerAppDelegate.h
@@ -0,0 +1,21 @@
+//
+// iPlayerAppDelegate.h
+// iPlayer
+//
+// Created by Gu Update on 09-7-15.
+// Copyright __MyCompanyName__ 2009. All rights reserved.
+//
+
+@interface iPlayerAppDelegate : NSObject <UIApplicationDelegate> {
+
+ UIWindow *window;
+ UINavigationController *navigationController;
+}
+
+@property (nonatomic, retain) IBOutlet UIWindow *window;
+@property (nonatomic, retain) IBOutlet UINavigationController *navigationController;
+
+- (void) activateWWAN;
+
+@end
+
diff --git a/samples/rtsp_player/Classes/iPlayerAppDelegate.m b/samples/rtsp_player/Classes/iPlayerAppDelegate.m
new file mode 100755
index 0000000..5c0aad5
--- /dev/null
+++ b/samples/rtsp_player/Classes/iPlayerAppDelegate.m
@@ -0,0 +1,53 @@
+//
+// iPlayerAppDelegate.m
+// iPlayer
+//
+// Created by Gu Update on 09-7-15.
+// Copyright __MyCompanyName__ 2009. All rights reserved.
+//
+
+#import "iPlayerAppDelegate.h"
+#import "RtspTestViewController.h"
+
+@implementation iPlayerAppDelegate
+
+@synthesize window;
+@synthesize navigationController;
+
+#pragma mark -
+#pragma mark Application lifecycle
+
+- (void)applicationDidFinishLaunching:(UIApplication *)application {
+
+ application.idleTimerDisabled=YES;
+ [application setStatusBarHidden:YES];
+
+ RtspTestViewController *rtsp = [[RtspTestViewController alloc] initWithNibName:@"RtspTestViewController" bundle:nil];
+
+ navigationController = [[UINavigationController alloc] initWithRootViewController:rtsp];
+ [window addSubview:[navigationController view]];
+ [window makeKeyAndVisible];
+
+
+}
+
+
+- (void)applicationWillTerminate:(UIApplication *)application {
+ // Save data if appropriate
+}
+
+//iphone4不退出
+- (void)applicationWillResignActive:(UIApplication *)application {
+ [[NSNotificationCenter defaultCenter] postNotificationName:@"MEApplicationDidEnterBackgroundNotification" object:nil];
+}
+
+#pragma mark -
+#pragma mark Memory management
+
+- (void)dealloc {
+ [navigationController release];
+ [window release];
+ [super dealloc];
+}
+@end
+
diff --git a/samples/rtsp_player/Classes/rtsp/BosenVect.h b/samples/rtsp_player/Classes/rtsp/BosenVect.h
new file mode 100755
index 0000000..da539cf
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/BosenVect.h
@@ -0,0 +1,103 @@
+
+#if !defined BOSENVect_H__
+#define BOSENVect_H__
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+// define Find function
+typedef int (*PFNCOMPARATOR) ( void * pFirst, void * pSecond );
+
+// define delete function. The default is "delete".
+typedef void (*PFNRELEASE) ( void * pObj );
+
+struct BosenNode
+{
+ void * m_pObj;
+ struct BosenNode * m_pNext;
+ struct BosenNode * m_pPrev;
+};
+
+#include "stdlib.h"
+class CBOSENVect{
+public:
+ CBOSENVect( bool bIsClear = false, PFNRELEASE pfnRelease = NULL );
+ ~CBOSENVect();
+
+ void Init( bool bIsClear = false, PFNRELEASE pfnRelease = NULL );
+
+ int GetSize( );
+ bool Append( void * pObj );
+ bool AppendFirst( void * pObj );
+ void RemoveAll( );
+ bool SetElementAt( int iIndex, void * pObj );
+ void * GetElementAt( int iIndex );
+ bool InsertElementAt( int iIndex, void * pObj );
+ void * RemoveElementAt( int iIndex );
+ int Find( void * pObj, void ** ppResult, PFNCOMPARATOR pfnComp = NULL );
+ bool SetElementAtEx( int iIndex, void * pObj, void ** ppOldObj );
+ int FindIndex( void * pObj, PFNCOMPARATOR pfnComp );
+
+ void Reverse( );
+
+private:
+
+ BosenNode * m_pHead;
+ BosenNode * m_pTail;
+ int m_iSize;
+ int m_iRef;
+ bool m_bIsClear;
+ PFNRELEASE m_pfnRelease;
+
+};
+
+
+class CBosenStack{
+public:
+ CBosenStack( bool bIsClear = false, PFNRELEASE pfnRelease = NULL )
+ :m_clVect( bIsClear, pfnRelease )
+ {
+ }
+
+
+ ~CBosenStack(){
+ }
+
+public:
+ void push( void * pObj );
+ void * pop( );
+
+ void * GetElementAt( int iIndex );
+
+ void RemoveElementAt( int iIndex )
+ {
+ m_clVect.RemoveElementAt( iIndex );
+ }
+
+ void RemoveAll();
+ bool empty()
+ {
+ if(m_clVect.GetSize()==0)
+ return true;
+ else
+ return false;
+ }
+ void * front()
+ {
+ return GetElementAt(0);
+ }
+ int GetSize()
+ {
+ return m_clVect.GetSize();
+ }
+
+private:
+
+ CBOSENVect m_clVect;
+
+};
+
+
+#endif
+
diff --git a/samples/rtsp_player/Classes/rtsp/FrameList.h b/samples/rtsp_player/Classes/rtsp/FrameList.h
new file mode 100755
index 0000000..e16c058
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/FrameList.h
@@ -0,0 +1,63 @@
+// FrameList.h: interface for the CFrameList class.
+//
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(AFX_FRAMELIST_H__D517C6C9_E430_4122_BE39_DC421AEB3028__INCLUDED_)
+#define AFX_FRAMELIST_H__D517C6C9_E430_4122_BE39_DC421AEB3028__INCLUDED_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "BosenVect.h"
+#define INITQUEUENUM 10//0
+
+typedef struct
+{
+ unsigned char* pContent;
+ unsigned long nFrameLength;
+ unsigned long nTimeStamp;
+ unsigned long nParam1; // À©Õ¹ÓÃ
+ unsigned long nParam2; // À©Õ¹ÓÃ
+} MediaData_FRAME;
+
+class CFrameList
+{
+public:
+ CFrameList();
+ virtual ~CFrameList();
+
+public:
+ MediaData_FRAME* GetFreeNote() ;
+ MediaData_FRAME* GetDataNote() ;
+ void AddToDataList(MediaData_FRAME* t) ;
+ void AddToFreeList(MediaData_FRAME* t) ;
+ void Reset() ;
+
+private:
+ MediaData_FRAME* m_tmpNote ;
+ CBosenStack m_datalist ;
+ CBosenStack m_freelist ;
+};
+#endif // !defined(AFX_FRAMELIST_H__D517C6C9_E430_4122_BE39_DC421AEB3028__INCLUDED_)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/samples/rtsp_player/Classes/rtsp/RtspParser.h b/samples/rtsp_player/Classes/rtsp/RtspParser.h
new file mode 100755
index 0000000..ce2dd3f
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/RtspParser.h
@@ -0,0 +1,63 @@
+// DhStreamParser.h: interface for the DhStreamParser class.
+//
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(AFX_DHSTREAMPARSER_H__4CB30E13_2FFF_4236_AEE3_E7BD20C8C173__INCLUDED_)
+#define AFX_DHSTREAMPARSER_H__4CB30E13_2FFF_4236_AEE3_E7BD20C8C173__INCLUDED_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "FrameList.h"
+#include "rtsp.h"
+
+class RtspParser
+{
+public:
+ RtspParser();
+ virtual ~RtspParser();
+ int play();
+ void stop();
+ int create(const char * url);//,const char *address,int port);
+ int create(const char * url ,int type);
+ int getstatu();
+ // ÊäÈëÊý¾Ý.
+ //int InputData(unsigned char *pData, unsigned long nDataLength);
+ //int InputDataEx(unsigned char *pData, unsigned long nDataLength);
+ // ͬ²½Êä³öÖ¡Êý¾Ý.
+ MediaData_FRAME * GetNextFrame();
+ // µ÷ÓÃÕâ¸öµÈͬÓÚÖØÕÒIÖ¡,»òÕßµÚÒ»´Îµ÷ÓõÄʱºò,ÕÒIÖ¡.
+ MediaData_FRAME * GetNextKeyFrame();
+ void PutPacketToQueueVodEx(char* pcContent, int iLen);
+ int Reset();
+
+ MediaData_FRAME * GetNextAudioFrame();
+ MediaData_FRAME* m_tmp_Frameinfo;//ÁÙʱ֡ÐÅÏ¢
+
+ CFrameList m_FrameInfoList ;//Ö¡ÐÅÏ¢Á´±í
+ CFrameList m_auFrameInfoList ;//ÒôƵ֡ÐÅÏ¢Á´±í
+ MediaData_FRAME *m_FrameInfo;
+
+ int statu;
+ /*char * m_szDVRData; //ÈñÃ÷µÄDVR_Data
+ int m_nCurLenDVRData; //µ±Ç°DVR_DataµÄ´óС
+ char * m_szPacket; //ÈñÃ÷µÄDVR_Data
+ int m_nStartPos; //µ±Ç°Î»ÖÃ*/
+
+ source_struct * source;
+
+ int GetMediaInfo(TAVPMediaInfo * pInfo);
+
+private:
+ //ͨ¹ýURL»ñÈ¡·þÎñÆ÷µØÖ·£¬¶Ë¿ÚµÈÐÅÏ¢
+ //According to url get address, port
+ //iTypeĬÈÏΪ0, ²Î¿¼ SST_..
+ int parseUrl(const char * url, int iType);
+
+ char m_address[256];
+ int m_port;
+
+};
+
+#endif // !defined(AFX_DHSTREAMPARSER_H__4CB30E13_2FFF_4236_AEE3_E7BD20C8C173__INCLUDED_)
diff --git a/samples/rtsp_player/Classes/rtsp/StdAfx.h b/samples/rtsp_player/Classes/rtsp/StdAfx.h
new file mode 100755
index 0000000..e714c05
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/StdAfx.h
@@ -0,0 +1,43 @@
+// stdafx.h : include file for standard system include files,
+// or project specific include files that are used frequently, but
+// are changed infrequently
+//
+
+#if !defined(AFX_STDAFX_H__45D84E80_0D80_41DF_90A1_2FAE8918088B__INCLUDED_)
+#define AFX_STDAFX_H__45D84E80_0D80_41DF_90A1_2FAE8918088B__INCLUDED_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
+
+#include <stdio.h>
+
+// TODO: reference additional headers your program requires here
+#ifndef WIN32
+ #include <sys/time.h>
+ #include <sys/types.h>
+ #include <sys/socket.h>
+ #include <netinet/in.h>
+ #include <netdb.h>
+ #include <netinet/tcp.h>
+ #include <arpa/inet.h>
+ #include <sys/utsname.h>
+ #include <unistd.h>
+ #include <fcntl.h>
+ #include<ctype.h>
+ typedef int SOCKET;
+ #define SOCKET_ERROR -1
+ typedef void* LPVOID;
+#else
+ #include <winsock2.h>
+ #include <ws2tcpip.h>
+
+ typedef int socklen_t;
+ #pragma comment(lib,"ws2_32.lib")
+#endif
+//{{AFX_INSERT_LOCATION}}
+// Microsoft Visual C++ will insert additional declarations immediately before the previous line.
+
+#endif // !defined(AFX_STDAFX_H__45D84E80_0D80_41DF_90A1_2FAE8918088B__INCLUDED_)
diff --git a/samples/rtsp_player/Classes/rtsp/libRtspParser.a b/samples/rtsp_player/Classes/rtsp/libRtspParser.a
new file mode 100755
index 0000000..e764de6
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/libRtspParser.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/rtsp/libRtspParserSim.a b/samples/rtsp_player/Classes/rtsp/libRtspParserSim.a
new file mode 100755
index 0000000..c54c04f
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/libRtspParserSim.a
Binary files differ
diff --git a/samples/rtsp_player/Classes/rtsp/rtsp.h b/samples/rtsp_player/Classes/rtsp/rtsp.h
new file mode 100755
index 0000000..cdfa390
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/rtsp.h
@@ -0,0 +1,210 @@
+
+#if !defined( US_RTSP_H )
+#define US_RTSP_H
+
+#include "StdAfx.h"
+
+const char base[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
+const unsigned char amrnb_packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0};
+const unsigned char amrwb_packed_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
+
+#define SOCKBUFSIZE 65536
+#define MAXRTPPAKCRTSIZE 1500
+#define MINRTPPACKETSIZE 12
+
+//Streaming Server Type
+#define SST_RTSP_GENERAL 0
+#define SST_RTSP_DARWIN 1
+#define SST_RTSP_HELIX 2
+#define SST_RTSP_DAHUA 3
+#define SST_RTSP_XUNLEI 4
+#define SST_RTSP_HISTREAM 5
+#define SST_RTSP_STARV 6
+
+typedef struct{
+ char url[256];
+ char ip[256];
+ int port;
+ int fulltime;
+ int maxtime;
+ void (*nl_notify)(int msgId, unsigned int wParam, unsigned int lParam);
+}source_param_struct;
+
+typedef struct{
+ unsigned char* iData; //Ö¡Êý¾Ý£¬¸ÃÊý¾ÝÓÉrtspÄ£¿éÀ´·ÖÅ䣬rtspÄ£¿é¸ºÔðÊÍ·Å
+ unsigned int iLen; //µ±Ç°°ü³¤¶È£¬
+ unsigned int iPTS; //Frame timestamp
+ unsigned int iFrameIndex; //µ±Ç°Ö¡ÐòºÅ
+
+ unsigned char iFrameType; //1->audio,2->video
+ unsigned char iIFrame; //Èç¹ûÊÇÊÓÆµÖ¡£¬Åжϵ±Ç°ÊÇ·ñΪ¹Ø¼üÖ¡,Èç¹ûÒôƵ֡£¬ÔòÎÞЧ
+ unsigned int iRealLen;
+}TSourceFrame;
+
+typedef struct{
+ //AVP video format
+ int wFPS; //Ö¡ÂÊ£¬Ò»°ãÔÚ3-25Ö®¼ä
+ int wBitDepth; //É«Éһ°ãÔÚ8£¬16£¬24£¬32
+ int wWidth; //ÊÓÆµ¿í
+
+ int wHeight; //ÊÓÆµ¸ß
+
+ int dwVideoCompress; //1->MPEG4,2->H264,3->H263
+ int dwVideoBitRate; //ÊÓÆµÆ½¾ùÂëÁ÷,bps
+ int wVideoReserved; //±£Áô,µ±Îªµã²¥Ê±£¬±£´æ¸Ã½ÚÄ¿µÄÒôƵ×ܳ¤£¬Ò»°ãÇé¿öϺÍÊÓÆµ×ܳ¤Ò»ÖÂ
+
+}TAVPVideoInfo;
+
+typedef struct {
+ // audio format
+ int wAudioFormat; //²Î¿´1->amr-nb,2->amr-wb,3->mp3,4->aac
+ int wChannelNum; //ÉùµÀÊý£¬mono or stereo
+ int wBlockAlign; //reserve
+ int wBitsPerSample; //reserve
+ int wFrameTime; //reserve
+ int wAudioReserved; //±£´æ¸Ã½ÚÄ¿µÄÒôƵ×ܳ¤£¬Ò»°ãÇé¿öϺÍÊÓÆµ×ܳ¤Ò»ÖÂ
+
+ int dwSamplesPerSec; //reserve
+ int dwBitRate; //ƽ¾ùÂëÁ÷
+}TAVPAudioInfo;
+
+typedef struct DataUnit {
+ TSourceFrame* frame;
+ int complete;
+ DataUnit* next_unit;
+}DataUnit;
+
+typedef struct{
+
+ char codec[32];
+ char sps[256];
+ int sps_len;
+ char pps[256];
+ int pps_len;
+
+ unsigned int timestamp;
+ unsigned int first_timestamp;
+ unsigned int timestamp_range;
+ unsigned int timestamp_step;
+
+ int seq;
+ int first_seq;
+ int last_seq;
+ int seq_lost; //1:seq lost, if H264 skip all the frame until IDR
+
+ int lastiPTS;
+ int fps;
+
+ DataUnit *listHead;
+ DataUnit *listEnd;
+
+ int frame_num;
+ int frame_cnt;
+
+}RTPDepacker_video;
+
+typedef struct{
+
+ DataUnit *listHead;
+ DataUnit *listEnd;
+ int lastiPTS;
+
+ int frame_num;
+ int frame_cnt;
+
+ unsigned int timestamp;
+ unsigned int first_timestamp;
+ unsigned int timestamp_step;
+ unsigned int timestamp_range;
+
+ int first_seq;
+ int last_seq;
+ int seq;
+
+}RTPDepacker_audio;
+
+typedef struct{
+ TAVPVideoInfo videoInfo;
+ TAVPAudioInfo audioInfo;
+}TAVPMediaInfo;
+
+
+typedef struct {
+ char *rtp_buf; //store the uncomplete rtp packet
+ int rtp_len; //index the full size of this rtp packet
+ int rtp_len_cur; //index the current size of the rtp packet
+
+ char rtpLen[4]; //store the 4 bytes of the Interleaved rtp packet header
+ int rtpLen_cur; //index the current size of the rtpLen
+
+ //content-base
+ char content_base[256]; //Content-Base
+ //data
+ char session_id[64]; //asigned by DarwinServer
+ int video_flag; //1:video 0:no video
+ char video_trackid[64]; //asigmed by DSS
+ char video_codec[32]; //"H264"
+ char video_info[256]; //the base64 coded sps and pps and other info
+
+ unsigned char channel;
+ unsigned char is_ready; //is ready for receive raw audio/video data. //xunlei need sps to be ready.
+ unsigned char reserve2;
+ unsigned char reserve3;
+ int video_samplerate;
+ int video_width;
+ int video_height;
+ int video_bitrate;
+ float video_range;
+
+ int audio_flag;
+ char audio_trackid[64];
+ char audio_codec[32];
+ int audio_amrnb;
+ char audio_info[256];
+ int audio_samplerate;
+ int audio_channels;
+ int audio_bitrate;
+ float audio_range;
+
+ unsigned int audio_ssrc;
+ unsigned int video_ssrc;
+
+ double range;
+ double range_start;
+ double range_end;
+
+ int server_type; //rtsp server
+
+ RTPDepacker_video * videoData;
+ RTPDepacker_audio * audioData;
+}RTSPContext;
+
+typedef struct {
+ SOCKET sock;
+ char *sock_buf; //store the data from socket
+ int sock_len; //index the size of the data
+}SocketContext;
+
+typedef struct{
+ //param
+ char url[256];
+ char ip[256];
+ int port;
+
+ RTSPContext *rtsp_context;
+ SocketContext *socket_context;
+ TAVPMediaInfo mediaInfo;
+
+}source_struct;
+
+int GetStatus( source_struct *source );
+int GetRTP( source_struct *source );
+
+TSourceFrame *GetNextVideoFrame( source_struct *source );
+TSourceFrame *GetNextAudioPacket( source_struct *source );
+void FreeSourceFrame( TSourceFrame* frame );
+TAVPMediaInfo *GetMediaInfo( source_struct *source );
+void source_destroy( source_struct *source );
+source_struct *source_create( source_param_struct * param );
+
+#endif
diff --git a/samples/rtsp_player/Classes/rtsp/us_parser_raw264.h b/samples/rtsp_player/Classes/rtsp/us_parser_raw264.h
new file mode 100755
index 0000000..3488d56
--- /dev/null
+++ b/samples/rtsp_player/Classes/rtsp/us_parser_raw264.h
@@ -0,0 +1,165 @@
+// RtspParser.h: interface for the DhStreamParser class.
+// ParserÊǶàÏ̹߳¤×÷£¬Ò»·½ÃæÈ¡Êý¾Ý£¬Ò»·½Ãæ¶ÔÍâÌṩÊý¾Ý
+// ÐèÒª½øÐÐͬ²½´¦Àí
+//
+//////////////////////////////////////////////////////////////////////
+
+#if !defined(US_PARSER_RAW264__INCLUDED_)
+#define US_PARSER_RAW264__INCLUDED_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+
+#include "stdafx.h"
+//#include "us_kode.h"
+//#include "us_core_parser.h"
+//#include "us_parser.h"
+#include "FrameList.h"
+
+class Raw264Parser //: public IUSParser
+{
+public:
+ Raw264Parser();
+ virtual ~Raw264Parser();
+ // ÊäÈëÊý¾Ý.
+ //int InputData(unsigned char *pData, unsigned long nDataLength);
+ //int InputDataEx(unsigned char *pData, unsigned long nDataLength);
+ void PutPacketToQueueVodEx(char* pcContent, int iLen);
+
+public:
+ //IUSParser
+
+ //Start Play
+ int Play();
+
+ //Stop
+ void Stop();
+
+ //Create source
+ int Prepare(const char * Tempip, int Tempport,const char * Tempusername,const char * Temppassword,int Tmepchannelid,int TempStreamType)
+ {
+ return 0;
+ }
+
+ int Prepare(const char * Tempuri, int Temptype);
+
+ //Get status
+ int GetStatus();
+ void SetStatus(int Playstatu)
+ {
+ return;
+ }
+
+ //Gettype
+ int gettype();
+
+ //finish
+ int Reset();
+
+ //PTZ¿ØÖÆ PTZÃüÁî PTZÊý¾Ý ͨµÀ
+ int DoPtzControl(int PtzCmd,int PtzData,int ChannelID)
+ {
+ return 0;
+ }
+
+ void SetCompanyIdentity(char * TempcompanyIdentity)
+ {
+ return;
+ }
+
+ //·µ»Ø0ÉèÖÃʧ°Ü ·µ»Ø1ÉèÖóɹ¦
+ //int SetDeviceChanelInfo(ChannelInfoSeting ChannelInfo)
+ //{
+ // return 0;
+ //}
+
+ //
+ // ͬ²½Êä³öÖ¡Êý¾Ý.
+ MediaData_FRAME * GetNextFrame();
+ MediaData_FRAME * GetNextAudioFrame();
+
+ // µ÷ÓÃÕâ¸öµÈͬÓÚÖØÕÒIÖ¡,»òÕßµÚÒ»´Îµ÷ÓõÄʱºò,ÕÒIÖ¡.
+ MediaData_FRAME * GetNextKeyFrame();
+
+ //************µã²¥Ïà¹Ø***************
+ //virtual int Play(unsigned char * VideoFilename) = 0;//videofilenameÐèÒª²¥·ÅµÄ¼ÏñÎļþ
+ //ÐèÒª²¥·ÅµÄ¼Ïñ½á¹¹Ìå
+// int Play(Video_File VideoFile)
+// {
+// return 0;
+// }
+
+ //SeekTimeÐèÒªseekµÄʱ¼ä
+// virtual int Seek(Date_Time SeekTime)
+// {
+// return 0;
+// }
+
+ //ÔÝÍ£
+ virtual int Pause()
+ {
+ return 0;
+ }
+
+ virtual int Resume()
+ {
+ return 0;
+ }
+
+ //ËÑË÷¼ÏñÎļþchannel ´Ó0¿ªÊ¼ FileType²éѯµÄ¼ÏñÀàÐÍ
+ //1£º³£¹æÂ¼Ïñ(¶¨Ê±Â¼Ïñ) 2£º±¨¾¯Â¼Ïñ(ÒÆ¶¯±¨¾¯Â¼Ïñ)£¬3 £ºËùÓмÏñ 4¡¢ÊÖ¶¯Â¼Ïñ 5¡¢Ì½Í·±¨¾¯Â¼Ïñ
+ //ÕâÀïÃæÐèÒª×ö·¢ËÍËÑË÷Êý¾Ý£¬Í¬Ê±½ÓÊÜÍê³ÉËÑË÷µÄÁбí Ö®ºó²ÅÄÜ·µ»Ø ·µ»ØÊÕµ½µÄÊý¾Ý³¤¶È Èç¹ûʧ°Ü·µ»Ø-1
+ //virtual int SearchVideoFile(Date_Time StartTime,Date_Time EndTime,int channel,int FileType)
+ //{
+ // return 0;
+ //};
+ //************µã²¥Ïà¹Ø***************
+
+ //»ñÈ¡ÏÂÒ»¸ö¼ÏñÎļþ
+ //Video_File * GetNextVideoFile()
+ //{
+ // return NULL;
+ //}
+
+ //»ñÈ¡ÏÂÒ»¸ö±¨¾¯¼Ç¼
+ //Alarm_Struct * GetNextAlarm()
+ //{
+ // return NULL;
+ //}
+
+public:
+ int statu;
+ CFrameList m_FrameInfoList ;//Ö¡ÐÅÏ¢Á´±í
+ MediaData_FRAME *m_FrameInfo;
+ CFrameList m_auFrameInfoList ;//ÒôƵ֡ÐÅÏ¢Á´±í
+
+ char m_url[512];
+ int m_Type;
+
+
+
+protected:
+ //////////////////////////////////////////////////////////////////////////
+ //
+
+ MediaData_FRAME* m_tmp_Frameinfo;//ÁÙʱ֡ÐÅÏ¢
+
+
+ /*char * m_szDVRData; //ÈñÃ÷µÄDVR_Data
+ int m_nCurLenDVRData; //µ±Ç°DVR_DataµÄ´óС
+ char * m_szPacket; //ÈñÃ÷µÄDVR_Data
+ int m_nStartPos; //µ±Ç°Î»ÖÃ*/
+private:
+ //ͨ¹ýURL»ñÈ¡·þÎñÆ÷µØÖ·£¬¶Ë¿ÚµÈÐÅÏ¢
+ //According to url get address, port
+ //iTypeĬÈÏΪ0, ²Î¿¼ SST_..
+ int parseUrl(const char * url, int iType);
+
+ char m_address[256];
+ int m_port;
+
+ //US_THREAD_T m_Thread;
+};
+
+#endif // !defined(AFX_DHSTREAMPARSER_H__4CB30E13_2FFF_4236_AEE3_E7BD20C8C173__INCLUDED_)
diff --git a/samples/rtsp_player/Classes/test/RtspTestViewController.h b/samples/rtsp_player/Classes/test/RtspTestViewController.h
new file mode 100755
index 0000000..35a34ed
--- /dev/null
+++ b/samples/rtsp_player/Classes/test/RtspTestViewController.h
@@ -0,0 +1,60 @@
+//
+// RtspTestViewController.h
+// iPlayer
+//
+// Created by HSKJ on 11-10-18.
+// Copyright 2011 _MyCompanyName. All rights reserved.
+//
+#ifdef __cplusplus
+extern "C" {
+#endif
+#include "VideoDecode.h"
+//#include "yuv2rgbex.h"
+#ifdef __cplusplus
+}
+#endif
+
+#import <UIKit/UIKit.h>
+
+
+@class DisplayView;
+
+@interface RtspTestViewController : UIViewController<UITextFieldDelegate> {
+ IBOutlet DisplayView * displayview;
+ IBOutlet UITextField * urlRtsp;
+ IBOutlet UIView * containView;
+ IBOutlet UIImageView * imageview;
+ //L264Decode _l264D;
+ BOOL iHavedIFrame;
+ int old_len;
+ CGContextRef _context;
+ void *parser;
+ void *_pDib;
+ void * _pDibRgb;
+ CVIDEO_DECODE * _pCodec;
+
+ AVPicture picture;
+ int videoStream;
+ struct SwsContext *img_convert_ctx;
+ AVFrame *pFrame;
+ UIImage *currentImage;
+
+ NSTimer * _timer;
+ BOOL bExitThreadDisplay;
+
+ int m_Width;
+ int m_Height;
+}
+
+/* Last decoded picture as UIImage */
+@property (nonatomic, readonly) UIImage *currentImage;
+@property(nonatomic,retain) DisplayView *displayview;
+@property(nonatomic,retain) UITextField *urlRtsp;
+@property (nonatomic, retain) UIImageView *imageview;
+
+- (IBAction)onStart:(id)sender;
+- (IBAction)onSnap:(id)sender;
+
+-(void)setupScaler;
+
+@end
diff --git a/samples/rtsp_player/Classes/test/RtspTestViewController.mm b/samples/rtsp_player/Classes/test/RtspTestViewController.mm
new file mode 100755
index 0000000..77141e9
--- /dev/null
+++ b/samples/rtsp_player/Classes/test/RtspTestViewController.mm
@@ -0,0 +1,534 @@
+//
+// RtspTestViewController.m
+// iPlayer
+//
+// Created by HSKJ on 11-10-18.
+// Copyright 2011 _MyCompanyName. All rights reserved.
+//
+
+#import "RtspTestViewController.h"
+#import "DisplayView.h"
+#include "FrameList.h"
+#include "RtspParser.h"
+#include "StdAfx.h"
+#include "rtsp.h"
+#include "us_parser_raw264.h"
+#include "videoDecode.h"
+#import <QuartzCore/QuartzCore.h>
+#import <mach/mach_time.h>
+
+
+
+@implementation RtspTestViewController
+@synthesize displayview;
+@synthesize urlRtsp;
+@synthesize imageview;
+
+/*
+ // The designated initializer. Override if you create the controller programmatically and want to perform customization that is not appropriate for viewDidLoad.
+- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil {
+ if ((self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil])) {
+ // Custom initialization
+ }
+ return self;
+}
+*/
+-(void)setupScaler {
+
+ // Release old picture and scaler
+ avpicture_free(&picture);
+ sws_freeContext(img_convert_ctx);
+
+ // Allocate RGB picture
+ avpicture_alloc(&picture, PIX_FMT_RGB24, m_Width, m_Height);
+ avcodec_init(); // Setup scaler
+ static int sws_flags = SWS_FAST_BILINEAR;
+ img_convert_ctx = sws_getContext(_pCodec->m_pAVCodecContext->width,
+ _pCodec->m_pAVCodecContext->height,
+ _pCodec->m_pAVCodecContext->pix_fmt,
+ m_Width,
+ m_Height,
+ PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+
+}
+
+
+// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
+- (void)viewDidLoad {
+ [super viewDidLoad];
+ //L264Decode_Init(&_l264D);
+ iHavedIFrame = NO;
+ old_len = 0;
+ _pDib = malloc(4*720*576);
+ memset(_pDib, 0, 4*720*576);
+
+ //_pDibRgb = malloc(4*720*576);
+ //memset(_pDibRgb, 0, 4*720*576);
+ //_pDibRgb = NULL;
+
+ CATransform3D landscapeTransform = CATransform3DIdentity;
+ landscapeTransform = CATransform3DRotate(landscapeTransform, M_PI, 1, 0, 0);
+
+ //self.displayview.layer.transform = landscapeTransform;
+ urlRtsp.delegate = self;
+ //self.imageview.layer.transform = landscapeTransform;
+
+
+ //_Codec.InitDecode(352, 288, CODEC_ID_H264);
+ //InitRgbYuv();
+ //Yuv2Rgb = yuv2rgb_init(32, 0);
+ //InitConvertTable();
+
+ m_Width = 0;
+ m_Height = 0;
+
+
+ bExitThreadDisplay = FALSE;
+}
+
+/*
+// Override to allow orientations other than the default portrait orientation.
+- (BOOL)shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation {
+ // Return YES for supported orientations
+ return (interfaceOrientation == UIInterfaceOrientationPortrait);
+}
+*/
+
+- (void)didReceiveMemoryWarning {
+ // Releases the view if it doesn't have a superview.
+ [super didReceiveMemoryWarning];
+
+ // Release any cached data, images, etc that aren't in use.
+}
+
+- (void)viewDidUnload {
+ [super viewDidUnload];
+ // Release any retained subviews of the main view.
+ // e.g. self.myOutlet = nil;
+ //L264Decode_End(&_l264D);
+
+ //UnInitDecode(_pCodec);
+ //[_timer invalidate];
+ bExitThreadDisplay = TRUE;
+}
+
+
+- (void)dealloc {
+ [super dealloc];
+}
+
+- (IBAction)onStart:(id)sender {
+ static bool isPlay = NO;
+ if (!isPlay ) {
+ NSLog(@"====================start");
+ isPlay = YES;
+ if (parser == NULL) {
+ RtspParser *parser1 = new RtspParser();
+ //Raw264Parser *parser1 = new Raw264Parser();
+ parser = parser1;
+
+ }
+
+ if ([self.urlRtsp.text length]) {
+ [self.urlRtsp resignFirstResponder];
+
+ ((RtspParser*)parser)->create([self.urlRtsp.text cStringUsingEncoding:NSUTF8StringEncoding], SST_RTSP_GENERAL);
+ self.urlRtsp.text = @"START";
+
+
+ ((RtspParser*)parser)->play();
+ [NSThread detachNewThreadSelector:@selector(getFrames) toTarget:self withObject:nil];
+
+ //START TIMER
+ _timer = [NSTimer scheduledTimerWithTimeInterval:1.0/30
+ target:self
+ selector:@selector(displayNextFrame)
+ userInfo:nil
+ repeats:YES];
+ }else {
+ isPlay = NO;
+ ((RtspParser*)parser)->stop();
+ [_timer invalidate];
+ }
+ }
+}
+
+- (IBAction)onSnap:(id)sender {
+ if (displayview) {
+ [displayview saveImage];
+ }
+}
+
+
+
+
+- (void)getFrames {
+ NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+ uint64_t ts = 0;
+ TAVPMediaInfo mediaInfo;
+
+ while(TRUE) {
+ if(((RtspParser*)parser)->GetMediaInfo(&mediaInfo))
+ {
+ m_Width = mediaInfo.videoInfo.wWidth;
+ m_Height = mediaInfo.videoInfo.wHeight;
+
+ _pCodec = (CVIDEO_DECODE*) malloc(sizeof(CVIDEO_DECODE));
+ memset(_pCodec, 0, sizeof(CVIDEO_DECODE));
+ InitDecode(_pCodec, m_Width, m_Height, CODEC_ID_H264, PIX_FMT_YUV420P); //PIX_FMT_ARGB, PIX_FMT_YUV420P,FMT_RGBA32
+
+ //InitColorSpace();
+ [self setupScaler];
+
+ break;
+
+ }
+ else
+ {
+
+ [NSThread sleepForTimeInterval:0.03];
+ /*
+ m_Width = 352;
+ m_Height = 288;
+
+ _pCodec = (CVIDEO_DECODE*) malloc(sizeof(CVIDEO_DECODE));
+ memset(_pCodec, 0, sizeof(CVIDEO_DECODE));
+ InitDecode(_pCodec, m_Width, m_Height, CODEC_ID_H264, PIX_FMT_YUV420P); //PIX_FMT_ARGB, PIX_FMT_YUV420P,FMT_RGBA32
+
+ //InitColorSpace();
+ [self setupScaler];
+
+ break;
+ */
+ }
+ }
+ while (TRUE) {
+ int statu = ((RtspParser*)parser)->getstatu();
+ NSLog(@"rtsp statu %d",statu);
+
+ MediaData_FRAME * frame = ((RtspParser*)parser)->GetNextFrame();
+ if (frame == NULL) {
+ [NSThread sleepForTimeInterval:0.03];
+
+ NSLog(@"==============================================");
+ }else
+ {
+ NSLog(@"====================frame %d",frame->nFrameLength);
+
+
+ [self.urlRtsp performSelectorOnMainThread:@selector(setText:) withObject:[NSString stringWithFormat:@"len %6d, isIFrame: %@, ts:%4d",frame->nFrameLength,frame->nParam1?@"TRUE":@"FALSE", ts] waitUntilDone:NO];
+
+ if (frame->nParam1 == 1) {
+ iHavedIFrame = YES;
+ }
+ if (!iHavedIFrame) {
+ continue;
+ }
+//#define READ_PACKET_H264
+#ifdef READ_PACKET_H264
+ unsigned char picpDataNode[32768];
+ int len_264 = 0;
+ if(1)
+ {
+ int static fileNo = 0;
+ char filename[20];
+ sprintf(filename, "%04d.264", fileNo);
+ fileNo++;
+ //FILE * file;
+ //file = fopen(filename, "rb+");
+ NSString *fname = [NSString stringWithFormat:@"%s",filename];
+ NSString *docDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
+ NSString *fullPath = [docDir stringByAppendingPathComponent:fname];
+ FILE *file = fopen([fullPath cStringUsingEncoding:NSUTF8StringEncoding], "rb+");
+
+ printf("read file %s", filename);
+ if(file) {
+ len_264 = fread(picpDataNode, 1, 32768, file);
+ printf("++++++++++++++++++++read %d\n", len_264);
+ fclose(file);
+ }
+ }
+
+ //解码
+ int olen=0;
+ int r = 0;
+ int len_align = ((len_264 + 7) / 8 ) * 8;
+ char *frameData = (char*) malloc(len_align);
+
+ if(len_264)
+ {
+
+ memset(frameData, 0, len_align);
+ memcpy(frameData, picpDataNode, len_264);
+ }
+ else{
+ return;
+ }
+
+#else
+ //解码
+ int olen=0;
+ int r = 0;
+ int frameData_len = frame->nFrameLength;
+ int len_align = ((frameData_len + 7) / 8 ) * 8;
+ char *frameData = (char*) malloc(len_align);
+ memset(frameData, 0, len_align);
+ memcpy(frameData, frame->pContent, frame->nFrameLength);
+#endif
+
+
+//#define SAVE_PACKET_H264
+#ifdef SAVE_PACKET_H264
+ int static fileNo = 0;
+ char filename[20];
+ sprintf(filename, "%04d.264", fileNo);
+ fileNo++;
+ //FILE * file;
+ //file = fopen(filename, "wb+");
+ NSString *fname = [NSString stringWithFormat:@"%s",filename];
+ NSString *docDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
+ NSString *fullPath = [docDir stringByAppendingPathComponent:fname];
+ FILE *file = fopen([fullPath cStringUsingEncoding:NSUTF8StringEncoding], "wb+");
+
+ NSLog(@"file:%@", fullPath);
+
+ printf("save file %s", filename);
+ if(file) {
+ int l = fwrite(frameData, 1, frame->nFrameLength, file);
+ printf("++++++++++++++++++++save %d\n", l);
+ fclose(file);
+ }
+#endif
+
+#define DECODE_TIME
+#if defined (DECODE_TIME)
+ /* 测试用,显示帧数 */
+ static mach_timebase_info_data_t time_info;
+ mach_timebase_info(&time_info);
+ static uint64_t lastTimer;
+ uint64_t now = mach_absolute_time();
+
+
+ //ts = ((now - lastTimer) * time_info.numer/ time_info.denom /1000000);
+ //NSLog(@"time stamp: %d", ts);
+ lastTimer = now;
+
+#endif
+ int keyFrame = 0;
+ NSLog(@"head:%x,%x,%x,%x,%x", (unsigned char) frameData[0], (unsigned char) frameData[1], (unsigned char) frameData[2], (unsigned char) frameData[3], (unsigned char) frameData[4]);
+ //r = L264Decode_DecodeFrame(&_l264D, (const unsigned char*)frameData, frame->nFrameLength, (unsigned char*)_pDib, &olen, FMT_RGBA32, 0);
+ r = AVDecode(_pCodec, (unsigned char*)frameData, frameData_len, (unsigned char**)&_pDib, &keyFrame);
+
+#if defined (DECODE_TIME)
+ now = mach_absolute_time();
+ ts = ((now - lastTimer) * time_info.numer/ time_info.denom /1000000);
+ NSLog(@"DECODE time stamp: %d", ts);
+ lastTimer = now;
+#endif
+
+//#define DUMP_YUV
+#if defined(DUMP_YUV)
+ if(keyFrame)
+ {
+ int static fileNo = 0;
+ char filename[20];
+ sprintf(filename, "%04d.yuv", fileNo);
+ fileNo++;
+ //FILE * file;
+ //file = fopen(filename, "rb+");
+ NSString *fname = [NSString stringWithFormat:@"%s",filename];
+ NSString *docDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
+ NSString *fullPath = [docDir stringByAppendingPathComponent:fname];
+ FILE *file = fopen([fullPath cStringUsingEncoding:NSUTF8StringEncoding], "wb+");
+ NSLog(@"file:%@", fullPath);
+
+ if(file) {
+ int l = fwrite(_pDib, 1, 352*288*3/2, file);
+ printf("++++++++++++++++++++save %d\n", l);
+ fclose(file);
+ }
+
+ }
+#endif
+
+ free(frameData);
+
+ if(r == 0)
+ {
+ NSLog(@"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx");
+ //L264Decode_End(&_l264D);
+ //L264Decode_Init(&_l264D);
+ ///UnInitDecode(_pCodec);
+ //InitDecode(_pCodec, 352, 288, CODEC_ID_H264, PIX_FMT_YUV420P);
+ iHavedIFrame = NO;
+ }
+ else
+ {
+ NSLog(@"oooooooooooooooooooooooooooooooooooooooooo");
+ //if (old_len != olen) {
+ {
+ old_len = olen;
+ //CGContextRef context = NULL;
+ //CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
+
+// Yuv12ToRgb32(352,288, (unsigned char *)_pDib, (unsigned char*) _pDibRgb);
+// Yuv2Rgb((unsigned char*) _pDibRgb, (unsigned char*)_pDib, (unsigned char*)_pDib + 352*288 ,
+// (unsigned char*)_pDib + 352*288*5/4,
+// 352,288,352, 288,352*288*3/2);
+// ConvertYUVtoRGB((unsigned char*)_pDib, (unsigned char*)_pDib + 352*288 ,
+// (unsigned char*)_pDib + 352*288*5/4,
+// (unsigned char*) _pDibRgb,
+// 352,288);
+// YUV420_RGB32((unsigned char*)_pDibRgb, (unsigned char*)_pDib, 352, 288);
+
+ sws_scale (img_convert_ctx, _pCodec->m_pPicture->data, _pCodec->m_pPicture->linesize,
+ 0, _pCodec->m_pAVCodecContext->height,
+ picture.data, picture.linesize);
+
+#if defined (DECODE_TIME)
+ now = mach_absolute_time();
+ ts = ((now - lastTimer) * time_info.numer/ time_info.denom /1000000);
+ NSLog(@"YUV2RGB time stamp: %d", ts);
+ lastTimer = now;
+#endif
+
+//#define DUMP_RGB
+#if defined(DUMP_RGB)
+ if(keyFrame)
+ {
+ int static fileNo = 0;
+ char filename[20];
+ sprintf(filename, "%04d.rgb", fileNo);
+ fileNo++;
+ //FILE * file;
+ //file = fopen(filename, "rb+");
+ NSString *fname = [NSString stringWithFormat:@"%s",filename];
+ NSString *docDir = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) lastObject];
+ NSString *fullPath = [docDir stringByAppendingPathComponent:fname];
+ FILE *file = fopen([fullPath cStringUsingEncoding:NSUTF8StringEncoding], "wb+");
+ NSLog(@"file:%@", fullPath);
+
+ if(file) {
+ int l = fwrite(_pDib, 1, 352*288*4, file);
+ printf("++++++++++++++++++++save %d\n", l);
+ fclose(file);
+ }
+
+ }
+#endif
+ //no out
+ //context = CGBitmapContextCreate(_pDibRgb, 352,288, 8, 4* 288, colorSpace, kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Little);
+
+ //blue color
+ //context = CGBitmapContextCreate(_pDibRgb, 352,288, 8, 4* 352, colorSpace, kCGImageAlphaNoneSkipLast| kCGBitmapByteOrder32Little);
+
+ //
+ //context = CGBitmapContextCreate(_pDib, 352,288, 8, 4* 352, colorSpace, kCGImageAlphaNoneSkipLast | kCGBitmapByteOrder32Little);
+ //CGColorSpaceRelease(colorSpace);
+ //CGContextRelease(_context);
+ //_context = context;
+
+
+
+ CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
+ CFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, picture.data[0], picture.linesize[0] * 288, kCFAllocatorNull);
+ CGDataProviderRef provider = CGDataProviderCreateWithCFData(data);
+ CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
+ CGImageRef cgImage = CGImageCreate(m_Width,
+ m_Height,
+ 8,
+ 24,
+ picture.linesize[0],
+ colorSpace,
+ bitmapInfo,
+ provider,
+ NULL,
+ NO,
+ kCGRenderingIntentDefault);
+ //[self.displayview setCGImage:cgImage];
+ //[self.displayview performSelectorOnMainThread:@selector(setNeedsDisplay) withObject:nil waitUntilDone:YES];
+
+ CGColorSpaceRelease(colorSpace);
+ if(currentImage != nil)
+ {
+ [currentImage release];
+ currentImage = nil;
+ }
+ currentImage = [UIImage imageWithCGImage:cgImage];
+ CGImageRelease(cgImage);
+ CGDataProviderRelease(provider);
+ CFRelease(data);
+
+ //self.imageview.image = nil;
+
+ //self.imageview.image = image;
+ //[self.imageview setImage:[UIImage imageNamed:@"icon.PNG"]];
+ //[self.imageview performSelectorOnMainThread:@selector(setNeedsDisplay) withObject:nil waitUntilDone:NO];
+
+ //CGRect rect = CGRectMake(0.0f, 0.0f, 320.f,109.0f);
+ //UIImageView * myImage = [[UIImageView alloc] initWithFrame:rect];
+ //[myImage setImage:image];
+ //myImage.opaque = YES;
+ //[self.view addSubview:myImage];
+ //[myImage release];
+
+
+ }
+
+ //[self.displayview setCGImage:CGBitmapContextCreateImage(_context)];
+ //[self.displayview setCGImage:cgImage];
+ //[self.displayview performSelectorOnMainThread:@selector(setNeedsDisplay) withObject:nil waitUntilDone:YES];
+ }
+
+#if defined (DECODE_TIME)
+ now = mach_absolute_time();
+ ts = ((now - lastTimer) * time_info.numer/ time_info.denom /1000000);
+ NSLog(@"DISPLAY time stamp: %d", ts);
+ lastTimer = now;
+#endif
+ }
+
+ //[NSThread sleepForTimeInterval:0.03];
+
+ }
+ [pool release];
+}
+
+-(void)displayNextFrame {
+ //[timer invalidate];
+ //[self.imageview setImage:[UIImage imageNamed:@"icon.PNG"]];
+ //NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+
+ //while (! bExitThreadDisplay)
+ {
+
+ if(currentImage != nil)
+ {
+ imageview.image = currentImage;
+ NSLog(@"image%f %f\n", currentImage.size.height, currentImage.size.width);
+ }
+
+ //[NSThread sleepForTimeInterval:0.03];
+ }
+
+ //[pool release];
+
+}
+
+
+- (void)textFieldDidBeginEditing:(UITextField *)textField {
+ containView.frame = CGRectMake(0, 0, 200, 133);
+}
+
+- (void)textFieldDidEndEditing:(UITextField *)textField {
+ containView.frame = CGRectMake(0, 283, 200, 133);
+ [textField resignFirstResponder];
+}
+
+- (BOOL)textFieldShouldReturn:(UITextField *)textField {
+ [textField resignFirstResponder];
+ return YES;
+}
+
+@end
diff --git a/samples/rtsp_player/Classes/test/RtspTestViewController.xib b/samples/rtsp_player/Classes/test/RtspTestViewController.xib
new file mode 100755
index 0000000..7a258bd
--- /dev/null
+++ b/samples/rtsp_player/Classes/test/RtspTestViewController.xib
@@ -0,0 +1,486 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<archive type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="7.10">
+ <data>
+ <int key="IBDocument.SystemTarget">1280</int>
+ <string key="IBDocument.SystemVersion">10K549</string>
+ <string key="IBDocument.InterfaceBuilderVersion">1938</string>
+ <string key="IBDocument.AppKitVersion">1038.36</string>
+ <string key="IBDocument.HIToolboxVersion">461.00</string>
+ <object class="NSMutableDictionary" key="IBDocument.PluginVersions">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string key="NS.object.0">933</string>
+ </object>
+ <object class="NSArray" key="IBDocument.IntegratedClassDependencies">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>IBUIButton</string>
+ <string>IBUIImageView</string>
+ <string>IBUIView</string>
+ <string>IBUITextField</string>
+ <string>IBProxyObject</string>
+ </object>
+ <object class="NSArray" key="IBDocument.PluginDependencies">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.Metadata">
+ <string key="NS.key.0">PluginDependencyRecalculationVersion</string>
+ <integer value="1" key="NS.object.0"/>
+ </object>
+ <object class="NSMutableArray" key="IBDocument.RootObjects" id="1000">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBProxyObject" id="372490531">
+ <string key="IBProxiedObjectIdentifier">IBFilesOwner</string>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBProxyObject" id="975951072">
+ <string key="IBProxiedObjectIdentifier">IBFirstResponder</string>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUIView" id="191373211">
+ <reference key="NSNextResponder"/>
+ <int key="NSvFlags">274</int>
+ <object class="NSMutableArray" key="NSSubviews">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBUIView" id="483066627">
+ <reference key="NSNextResponder" ref="191373211"/>
+ <int key="NSvFlags">292</int>
+ <object class="NSMutableArray" key="NSSubviews">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBUIButton" id="865430252">
+ <reference key="NSNextResponder" ref="483066627"/>
+ <int key="NSvFlags">292</int>
+ <string key="NSFrame">{{10, 70}, {144, 37}}</string>
+ <reference key="NSSuperview" ref="483066627"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="901862212"/>
+ <bool key="IBUIOpaque">NO</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <int key="IBUIContentHorizontalAlignment">0</int>
+ <int key="IBUIContentVerticalAlignment">0</int>
+ <int key="IBUIButtonType">1</int>
+ <string key="IBUINormalTitle">连接</string>
+ <object class="NSColor" key="IBUIHighlightedTitleColor" id="932920919">
+ <int key="NSColorSpace">3</int>
+ <bytes key="NSWhite">MQA</bytes>
+ </object>
+ <object class="NSColor" key="IBUINormalTitleColor">
+ <int key="NSColorSpace">1</int>
+ <bytes key="NSRGB">MC4xOTYwNzg0MzQ2IDAuMzA5ODAzOTMyOSAwLjUyMTU2ODY1NgA</bytes>
+ </object>
+ <object class="NSColor" key="IBUINormalTitleShadowColor" id="893094018">
+ <int key="NSColorSpace">3</int>
+ <bytes key="NSWhite">MC41AA</bytes>
+ </object>
+ <object class="IBUIFontDescription" key="IBUIFontDescription" id="519130917">
+ <string key="name">Helvetica-Bold</string>
+ <string key="family">Helvetica</string>
+ <int key="traits">2</int>
+ <double key="pointSize">15</double>
+ </object>
+ <object class="NSFont" key="IBUIFont" id="894530040">
+ <string key="NSName">Helvetica-Bold</string>
+ <double key="NSSize">15</double>
+ <int key="NSfFlags">16</int>
+ </object>
+ </object>
+ <object class="IBUIButton" id="901862212">
+ <reference key="NSNextResponder" ref="483066627"/>
+ <int key="NSvFlags">292</int>
+ <string key="NSFrame">{{177, 70}, {123, 37}}</string>
+ <reference key="NSSuperview" ref="483066627"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="1067268430"/>
+ <bool key="IBUIOpaque">NO</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <int key="IBUIContentHorizontalAlignment">0</int>
+ <int key="IBUIContentVerticalAlignment">0</int>
+ <int key="IBUIButtonType">1</int>
+ <string key="IBUINormalTitle">拍照</string>
+ <reference key="IBUIHighlightedTitleColor" ref="932920919"/>
+ <object class="NSColor" key="IBUINormalTitleColor">
+ <int key="NSColorSpace">1</int>
+ <bytes key="NSRGB">MC4xOTYwNzg0MzQ2IDAuMzA5ODAzOTMyOSAwLjUyMTU2ODY1NgA</bytes>
+ </object>
+ <reference key="IBUINormalTitleShadowColor" ref="893094018"/>
+ <reference key="IBUIFontDescription" ref="519130917"/>
+ <reference key="IBUIFont" ref="894530040"/>
+ </object>
+ <object class="IBUITextField" id="402769443">
+ <reference key="NSNextResponder" ref="483066627"/>
+ <int key="NSvFlags">292</int>
+ <string key="NSFrame">{{10, 20}, {886, 31}}</string>
+ <reference key="NSSuperview" ref="483066627"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="865430252"/>
+ <bool key="IBUIOpaque">NO</bool>
+ <bool key="IBUIClipsSubviews">YES</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <int key="IBUIContentVerticalAlignment">0</int>
+ <string key="IBUIText">rtsp://211.147.238.73:554/mobile/1/3E10554128FF6B12/vMcWYJmOzLyiMEPx.sdp?id=13800000001&t=1325220998&en=453b452757c14c25d3d2ef376838e408</string>
+ <int key="IBUIBorderStyle">3</int>
+ <object class="NSColor" key="IBUITextColor">
+ <int key="NSColorSpace">3</int>
+ <bytes key="NSWhite">MAA</bytes>
+ <object class="NSColorSpace" key="NSCustomColorSpace" id="472343359">
+ <int key="NSID">2</int>
+ </object>
+ </object>
+ <bool key="IBUIAdjustsFontSizeToFit">YES</bool>
+ <float key="IBUIMinimumFontSize">17</float>
+ <object class="IBUITextInputTraits" key="IBUITextInputTraits">
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUIFontDescription" key="IBUIFontDescription">
+ <string key="name">Helvetica</string>
+ <string key="family">Helvetica</string>
+ <int key="traits">0</int>
+ <double key="pointSize">12</double>
+ </object>
+ <object class="NSFont" key="IBUIFont">
+ <string key="NSName">Helvetica</string>
+ <double key="NSSize">12</double>
+ <int key="NSfFlags">16</int>
+ </object>
+ </object>
+ </object>
+ <string key="NSFrame">{{0, 296}, {320, 120}}</string>
+ <reference key="NSSuperview" ref="191373211"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="402769443"/>
+ <object class="NSColor" key="IBUIBackgroundColor">
+ <int key="NSColorSpace">3</int>
+ <bytes key="NSWhite">MQA</bytes>
+ <reference key="NSCustomColorSpace" ref="472343359"/>
+ </object>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUIImageView" id="662956442">
+ <reference key="NSNextResponder" ref="191373211"/>
+ <int key="NSvFlags">274</int>
+ <string key="NSFrame">{{2, 2}, {315, 286}}</string>
+ <reference key="NSSuperview" ref="191373211"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="483066627"/>
+ <object class="NSColor" key="IBUIBackgroundColor" id="886310780">
+ <int key="NSColorSpace">1</int>
+ <bytes key="NSRGB">MCAwIDAAA</bytes>
+ </object>
+ <bool key="IBUIOpaque">NO</bool>
+ <bool key="IBUIAutoresizesSubviews">NO</bool>
+ <bool key="IBUIClearsContextBeforeDrawing">NO</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUIView" id="1067268430">
+ <reference key="NSNextResponder" ref="191373211"/>
+ <int key="NSvFlags">-2147483356</int>
+ <string key="NSFrame">{{99, 242}, {122, 62}}</string>
+ <reference key="NSSuperview" ref="191373211"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView"/>
+ <reference key="IBUIBackgroundColor" ref="886310780"/>
+ <bool key="IBUIClearsContextBeforeDrawing">NO</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ </object>
+ <string key="NSFrame">{{0, 64}, {320, 416}}</string>
+ <reference key="NSSuperview"/>
+ <reference key="NSWindow"/>
+ <reference key="NSNextKeyView" ref="662956442"/>
+ <object class="NSColor" key="IBUIBackgroundColor">
+ <int key="NSColorSpace">3</int>
+ <bytes key="NSWhite">MQA</bytes>
+ <reference key="NSCustomColorSpace" ref="472343359"/>
+ </object>
+ <object class="IBUISimulatedStatusBarMetrics" key="IBUISimulatedStatusBarMetrics"/>
+ <object class="IBUISimulatedNavigationBarMetrics" key="IBUISimulatedTopBarMetrics">
+ <bool key="IBUIPrompted">NO</bool>
+ </object>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ </object>
+ <object class="IBObjectContainer" key="IBDocument.Objects">
+ <object class="NSMutableArray" key="connectionRecords">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">view</string>
+ <reference key="source" ref="372490531"/>
+ <reference key="destination" ref="191373211"/>
+ </object>
+ <int key="connectionID">3</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">urlRtsp</string>
+ <reference key="source" ref="372490531"/>
+ <reference key="destination" ref="402769443"/>
+ </object>
+ <int key="connectionID">7</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">containView</string>
+ <reference key="source" ref="372490531"/>
+ <reference key="destination" ref="483066627"/>
+ </object>
+ <int key="connectionID">12</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">imageview</string>
+ <reference key="source" ref="372490531"/>
+ <reference key="destination" ref="662956442"/>
+ </object>
+ <int key="connectionID">16</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchEventConnection" key="connection">
+ <string key="label">onStart:</string>
+ <reference key="source" ref="865430252"/>
+ <reference key="destination" ref="372490531"/>
+ <int key="IBEventType">7</int>
+ </object>
+ <int key="connectionID">9</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchEventConnection" key="connection">
+ <string key="label">onSnap:</string>
+ <reference key="source" ref="901862212"/>
+ <reference key="destination" ref="372490531"/>
+ <int key="IBEventType">7</int>
+ </object>
+ <int key="connectionID">14</int>
+ </object>
+ </object>
+ <object class="IBMutableOrderedSet" key="objectRecords">
+ <object class="NSArray" key="orderedObjects">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBObjectRecord">
+ <int key="objectID">0</int>
+ <object class="NSArray" key="object" id="0">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ <reference key="children" ref="1000"/>
+ <nil key="parent"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">1</int>
+ <reference key="object" ref="191373211"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="662956442"/>
+ <reference ref="483066627"/>
+ <reference ref="1067268430"/>
+ </object>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-1</int>
+ <reference key="object" ref="372490531"/>
+ <reference key="parent" ref="0"/>
+ <string key="objectName">File's Owner</string>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-2</int>
+ <reference key="object" ref="975951072"/>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">11</int>
+ <reference key="object" ref="483066627"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="865430252"/>
+ <reference ref="901862212"/>
+ <reference ref="402769443"/>
+ </object>
+ <reference key="parent" ref="191373211"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">13</int>
+ <reference key="object" ref="901862212"/>
+ <reference key="parent" ref="483066627"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">5</int>
+ <reference key="object" ref="865430252"/>
+ <reference key="parent" ref="483066627"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">15</int>
+ <reference key="object" ref="662956442"/>
+ <reference key="parent" ref="191373211"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">4</int>
+ <reference key="object" ref="402769443"/>
+ <reference key="parent" ref="483066627"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">6</int>
+ <reference key="object" ref="1067268430"/>
+ <reference key="parent" ref="191373211"/>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="flattenedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>-1.CustomClassName</string>
+ <string>-1.IBPluginDependency</string>
+ <string>-2.CustomClassName</string>
+ <string>-2.IBPluginDependency</string>
+ <string>1.IBPluginDependency</string>
+ <string>11.IBPluginDependency</string>
+ <string>13.IBPluginDependency</string>
+ <string>15.IBPluginDependency</string>
+ <string>4.IBPluginDependency</string>
+ <string>5.IBPluginDependency</string>
+ <string>6.CustomClassName</string>
+ <string>6.IBPluginDependency</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>RtspTestViewController</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>UIResponder</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>DisplayView</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="unlocalizedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <reference key="dict.values" ref="0"/>
+ </object>
+ <nil key="activeLocalization"/>
+ <object class="NSMutableDictionary" key="localizations">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <reference key="dict.values" ref="0"/>
+ </object>
+ <nil key="sourceID"/>
+ <int key="maxID">16</int>
+ </object>
+ <object class="IBClassDescriber" key="IBDocument.Classes">
+ <object class="NSMutableArray" key="referencedPartialClassDescriptions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBPartialClassDescription">
+ <string key="className">DisplayView</string>
+ <string key="superclassName">UIView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBProjectSource</string>
+ <string key="minorKey">./Classes/DisplayView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">RtspTestViewController</string>
+ <string key="superclassName">UIViewController</string>
+ <object class="NSMutableDictionary" key="actions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>onSnap:</string>
+ <string>onStart:</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>id</string>
+ <string>id</string>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="actionInfosByName">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>onSnap:</string>
+ <string>onStart:</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBActionInfo">
+ <string key="name">onSnap:</string>
+ <string key="candidateClassName">id</string>
+ </object>
+ <object class="IBActionInfo">
+ <string key="name">onStart:</string>
+ <string key="candidateClassName">id</string>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="outlets">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>containView</string>
+ <string>displayview</string>
+ <string>imageview</string>
+ <string>urlRtsp</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>UIView</string>
+ <string>DisplayView</string>
+ <string>UIImageView</string>
+ <string>UITextField</string>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="toOneOutletInfosByName">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>containView</string>
+ <string>displayview</string>
+ <string>imageview</string>
+ <string>urlRtsp</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBToOneOutletInfo">
+ <string key="name">containView</string>
+ <string key="candidateClassName">UIView</string>
+ </object>
+ <object class="IBToOneOutletInfo">
+ <string key="name">displayview</string>
+ <string key="candidateClassName">DisplayView</string>
+ </object>
+ <object class="IBToOneOutletInfo">
+ <string key="name">imageview</string>
+ <string key="candidateClassName">UIImageView</string>
+ </object>
+ <object class="IBToOneOutletInfo">
+ <string key="name">urlRtsp</string>
+ <string key="candidateClassName">UITextField</string>
+ </object>
+ </object>
+ </object>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBProjectSource</string>
+ <string key="minorKey">./Classes/RtspTestViewController.h</string>
+ </object>
+ </object>
+ </object>
+ </object>
+ <int key="IBDocument.localizationMode">0</int>
+ <string key="IBDocument.TargetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDependencyDefaults">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaTouchPlugin.iPhoneOS</string>
+ <real value="1280" key="NS.object.0"/>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDevelopmentDependencies">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaTouchPlugin.InterfaceBuilder3</string>
+ <integer value="3000" key="NS.object.0"/>
+ </object>
+ <bool key="IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion">YES</bool>
+ <int key="IBDocument.defaultPropertyAccessControl">3</int>
+ <string key="IBCocoaTouchPluginVersion">933</string>
+ </data>
+</archive>
diff --git a/samples/rtsp_player/Default.png b/samples/rtsp_player/Default.png
new file mode 100755
index 0000000..ca918a2
--- /dev/null
+++ b/samples/rtsp_player/Default.png
Binary files differ
diff --git a/samples/rtsp_player/Demo-Info.plist b/samples/rtsp_player/Demo-Info.plist
new file mode 100755
index 0000000..aa153aa
--- /dev/null
+++ b/samples/rtsp_player/Demo-Info.plist
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleDisplayName</key>
+ <string>RTSPDemo</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string>icon.PNG</string>
+ <key>CFBundleIdentifier</key>
+ <string>net.moveeye.rtspdemo</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.2.1</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>NSMainNibFile</key>
+ <string>MainWindow</string>
+ <key>UIApplicationExitsOnSuspend</key>
+ <true/>
+ <key>UIRequiresPersistentWiFi</key>
+ <true/>
+ <key>UIStatusBarHidden</key>
+ <true/>
+</dict>
+</plist>
diff --git a/samples/rtsp_player/Entitlements.plist b/samples/rtsp_player/Entitlements.plist
new file mode 100755
index 0000000..42cfb9c
--- /dev/null
+++ b/samples/rtsp_player/Entitlements.plist
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>get-task-allow</key>
+ <true/>
+</dict>
+</plist>
diff --git a/samples/rtsp_player/MainWindow.xib b/samples/rtsp_player/MainWindow.xib
new file mode 100755
index 0000000..7ba67d5
--- /dev/null
+++ b/samples/rtsp_player/MainWindow.xib
@@ -0,0 +1,537 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<archive type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="7.10">
+ <data>
+ <int key="IBDocument.SystemTarget">768</int>
+ <string key="IBDocument.SystemVersion">10J567</string>
+ <string key="IBDocument.InterfaceBuilderVersion">788</string>
+ <string key="IBDocument.AppKitVersion">1038.35</string>
+ <string key="IBDocument.HIToolboxVersion">462.00</string>
+ <object class="NSMutableDictionary" key="IBDocument.PluginVersions">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string key="NS.object.0">117</string>
+ </object>
+ <object class="NSMutableArray" key="IBDocument.EditedObjectIDs">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <integer value="2"/>
+ <integer value="13"/>
+ </object>
+ <object class="NSArray" key="IBDocument.PluginDependencies">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.Metadata">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys" id="0">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <object class="NSMutableArray" key="IBDocument.RootObjects" id="1000">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBProxyObject" id="841351856">
+ <string key="IBProxiedObjectIdentifier">IBFilesOwner</string>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBProxyObject" id="302016328">
+ <string key="IBProxiedObjectIdentifier">IBFirstResponder</string>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUICustomObject" id="664661524">
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUIWindow" id="380026005">
+ <reference key="NSNextResponder"/>
+ <int key="NSvFlags">1316</int>
+ <object class="NSPSMatrix" key="NSFrameMatrix"/>
+ <string key="NSFrameSize">{320, 480}</string>
+ <reference key="NSSuperview"/>
+ <object class="NSColor" key="IBUIBackgroundColor">
+ <int key="NSColorSpace">1</int>
+ <bytes key="NSRGB">MCAwIDAAA</bytes>
+ </object>
+ <bool key="IBUIOpaque">NO</bool>
+ <bool key="IBUIClearsContextBeforeDrawing">NO</bool>
+ <object class="IBUISimulatedStatusBarMetrics" key="IBUISimulatedStatusBarMetrics"/>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <object class="IBUINavigationController" id="701001926">
+ <object class="IBUISimulatedStatusBarMetrics" key="IBUISimulatedStatusBarMetrics"/>
+ <object class="IBUISimulatedOrientationMetrics" key="IBUISimulatedOrientationMetrics">
+ <int key="interfaceOrientation">1</int>
+ </object>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <bool key="IBUIHorizontal">NO</bool>
+ <object class="IBUINavigationBar" key="IBUINavigationBar" id="207850653">
+ <nil key="NSNextResponder"/>
+ <int key="NSvFlags">256</int>
+ <string key="NSFrameSize">{0, 0}</string>
+ <bool key="IBUIOpaque">NO</bool>
+ <bool key="IBUIClipsSubviews">YES</bool>
+ <bool key="IBUIMultipleTouchEnabled">YES</bool>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <int key="IBUIBarStyle">1</int>
+ </object>
+ <object class="NSMutableArray" key="IBUIViewControllers">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBUIViewController" id="619226028">
+ <object class="IBUINavigationItem" key="IBUINavigationItem" id="394667715">
+ <reference key="IBUINavigationBar"/>
+ <string key="IBUITitle"/>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ </object>
+ <reference key="IBUIParentViewController" ref="701001926"/>
+ <string key="IBUINibName">MainViewController</string>
+ <object class="IBUISimulatedStatusBarMetrics" key="IBUISimulatedStatusBarMetrics"/>
+ <object class="IBUISimulatedOrientationMetrics" key="IBUISimulatedOrientationMetrics">
+ <int key="interfaceOrientation">1</int>
+ </object>
+ <string key="targetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <bool key="IBUIHorizontal">NO</bool>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="IBObjectContainer" key="IBDocument.Objects">
+ <object class="NSMutableArray" key="connectionRecords">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">delegate</string>
+ <reference key="source" ref="841351856"/>
+ <reference key="destination" ref="664661524"/>
+ </object>
+ <int key="connectionID">4</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">window</string>
+ <reference key="source" ref="664661524"/>
+ <reference key="destination" ref="380026005"/>
+ </object>
+ <int key="connectionID">5</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBCocoaTouchOutletConnection" key="connection">
+ <string key="label">navigationController</string>
+ <reference key="source" ref="664661524"/>
+ <reference key="destination" ref="701001926"/>
+ </object>
+ <int key="connectionID">15</int>
+ </object>
+ </object>
+ <object class="IBMutableOrderedSet" key="objectRecords">
+ <object class="NSArray" key="orderedObjects">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBObjectRecord">
+ <int key="objectID">0</int>
+ <reference key="object" ref="0"/>
+ <reference key="children" ref="1000"/>
+ <nil key="parent"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">2</int>
+ <reference key="object" ref="380026005"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-1</int>
+ <reference key="object" ref="841351856"/>
+ <reference key="parent" ref="0"/>
+ <string key="objectName">File's Owner</string>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">3</int>
+ <reference key="object" ref="664661524"/>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-2</int>
+ <reference key="object" ref="302016328"/>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">9</int>
+ <reference key="object" ref="701001926"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="207850653"/>
+ <reference ref="619226028"/>
+ </object>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">11</int>
+ <reference key="object" ref="207850653"/>
+ <reference key="parent" ref="701001926"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">13</int>
+ <reference key="object" ref="619226028"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="394667715"/>
+ </object>
+ <reference key="parent" ref="701001926"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">14</int>
+ <reference key="object" ref="394667715"/>
+ <reference key="parent" ref="619226028"/>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="flattenedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>-1.CustomClassName</string>
+ <string>-2.CustomClassName</string>
+ <string>11.IBPluginDependency</string>
+ <string>13.IBPluginDependency</string>
+ <string>2.IBAttributePlaceholdersKey</string>
+ <string>2.IBEditorWindowLastContentRect</string>
+ <string>2.IBPluginDependency</string>
+ <string>3.CustomClassName</string>
+ <string>3.IBPluginDependency</string>
+ <string>9.IBEditorWindowLastContentRect</string>
+ <string>9.IBPluginDependency</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>UIApplication</string>
+ <string>UIResponder</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <object class="NSMutableDictionary">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <string>{{673, 276}, {320, 480}}</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>iPlayerAppDelegate</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ <string>{{306, 276}, {320, 480}}</string>
+ <string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="unlocalizedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <nil key="activeLocalization"/>
+ <object class="NSMutableDictionary" key="localizations">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <nil key="sourceID"/>
+ <int key="maxID">16</int>
+ </object>
+ <object class="IBClassDescriber" key="IBDocument.Classes">
+ <object class="NSMutableArray" key="referencedPartialClassDescriptions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBPartialClassDescription">
+ <string key="className">iPlayerAppDelegate</string>
+ <string key="superclassName">NSObject</string>
+ <object class="NSMutableDictionary" key="outlets">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>navigationController</string>
+ <string>window</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>UINavigationController</string>
+ <string>UIWindow</string>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="toOneOutletInfosByName">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>navigationController</string>
+ <string>window</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBToOneOutletInfo">
+ <string key="name">navigationController</string>
+ <string key="candidateClassName">UINavigationController</string>
+ </object>
+ <object class="IBToOneOutletInfo">
+ <string key="name">window</string>
+ <string key="candidateClassName">UIWindow</string>
+ </object>
+ </object>
+ </object>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBProjectSource</string>
+ <string key="minorKey">Classes/iPlayerAppDelegate.h</string>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableArray" key="referencedPartialClassDescriptionsV3.2+">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSError.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSFileManager.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyValueCoding.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyValueObserving.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyedArchiver.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSObject.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSRunLoop.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSThread.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSURL.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSURLConnection.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">QuartzCore.framework/Headers/CAAnimation.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">QuartzCore.framework/Headers/CALayer.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIAccessibility.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UINibLoading.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="758491255">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIResponder.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIApplication</string>
+ <string key="superclassName">UIResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIApplication.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIBarButtonItem</string>
+ <string key="superclassName">UIBarItem</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIBarButtonItem.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIBarItem</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIBarItem.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UINavigationBar</string>
+ <string key="superclassName">UIView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="1072346877">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UINavigationBar.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UINavigationController</string>
+ <string key="superclassName">UIViewController</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="772807924">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UINavigationController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UINavigationItem</string>
+ <string key="superclassName">NSObject</string>
+ <reference key="sourceIdentifier" ref="1072346877"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIResponder</string>
+ <string key="superclassName">NSObject</string>
+ <reference key="sourceIdentifier" ref="758491255"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UISearchBar</string>
+ <string key="superclassName">UIView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UISearchBar.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UISearchDisplayController</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UISearchDisplayController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UITextField.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIView</string>
+ <string key="superclassName">UIResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIViewController</string>
+ <reference key="sourceIdentifier" ref="772807924"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIViewController</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIPopoverController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIViewController</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UISplitViewController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIViewController</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UITabBarController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIViewController</string>
+ <string key="superclassName">UIResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIViewController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">UIWindow</string>
+ <string key="superclassName">UIView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">UIKit.framework/Headers/UIWindow.h</string>
+ </object>
+ </object>
+ </object>
+ </object>
+ <int key="IBDocument.localizationMode">0</int>
+ <string key="IBDocument.TargetRuntimeIdentifier">IBCocoaTouchFramework</string>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDependencies">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaTouchPlugin.iPhoneOS</string>
+ <integer value="768" key="NS.object.0"/>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDependencyDefaults">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaTouchPlugin.iPhoneOS</string>
+ <integer value="1024" key="NS.object.0"/>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDevelopmentDependencies">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaTouchPlugin.InterfaceBuilder3</string>
+ <integer value="3000" key="NS.object.0"/>
+ </object>
+ <bool key="IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion">YES</bool>
+ <string key="IBDocument.LastKnownRelativeProjectPath">iPlayer.xcodeproj</string>
+ <int key="IBDocument.defaultPropertyAccessControl">3</int>
+ <string key="IBCocoaTouchPluginVersion">117</string>
+ </data>
+</archive>
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/4xm.c b/samples/rtsp_player/ffmpeg/libavcodec/4xm.c
new file mode 100755
index 0000000..97436ce
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/4xm.c
@@ -0,0 +1,880 @@
+/*
+ * 4XM codec
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * 4XM codec.
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+#include "dsputil.h"
+#include "get_bits.h"
+#include "bytestream.h"
+
+//#undef NDEBUG
+//#include <assert.h>
+
+#define BLOCK_TYPE_VLC_BITS 5
+#define ACDC_VLC_BITS 9
+
+#define CFRAME_BUFFER_COUNT 100
+
+static const uint8_t block_type_tab[2][4][8][2]={
+ {
+ { //{8,4,2}x{8,4,2}
+ { 0,1}, { 2,2}, { 6,3}, {14,4}, {30,5}, {31,5}, { 0,0}
+ },{ //{8,4}x1
+ { 0,1}, { 0,0}, { 2,2}, { 6,3}, {14,4}, {15,4}, { 0,0}
+ },{ //1x{8,4}
+ { 0,1}, { 2,2}, { 0,0}, { 6,3}, {14,4}, {15,4}, { 0,0}
+ },{ //1x2, 2x1
+ { 0,1}, { 0,0}, { 0,0}, { 2,2}, { 6,3}, {14,4}, {15,4}
+ }
+ },{
+ { //{8,4,2}x{8,4,2}
+ { 1,2}, { 4,3}, { 5,3}, {0,2}, {6,3}, {7,3}, {0,0}
+ },{//{8,4}x1
+ { 1,2}, { 0,0}, { 2,2}, {0,2}, {6,3}, {7,3}, {0,0}
+ },{//1x{8,4}
+ { 1,2}, { 2,2}, { 0,0}, {0,2}, {6,3}, {7,3}, {0,0}
+ },{//1x2, 2x1
+ { 1,2}, { 0,0}, { 0,0}, {0,2}, {2,2}, {6,3}, {7,3}
+ }
+ }
+};
+
+static const uint8_t size2index[4][4]={
+ {-1, 3, 1, 1},
+ { 3, 0, 0, 0},
+ { 2, 0, 0, 0},
+ { 2, 0, 0, 0},
+};
+
+static const int8_t mv[256][2]={
+{ 0, 0},{ 0, -1},{ -1, 0},{ 1, 0},{ 0, 1},{ -1, -1},{ 1, -1},{ -1, 1},
+{ 1, 1},{ 0, -2},{ -2, 0},{ 2, 0},{ 0, 2},{ -1, -2},{ 1, -2},{ -2, -1},
+{ 2, -1},{ -2, 1},{ 2, 1},{ -1, 2},{ 1, 2},{ -2, -2},{ 2, -2},{ -2, 2},
+{ 2, 2},{ 0, -3},{ -3, 0},{ 3, 0},{ 0, 3},{ -1, -3},{ 1, -3},{ -3, -1},
+{ 3, -1},{ -3, 1},{ 3, 1},{ -1, 3},{ 1, 3},{ -2, -3},{ 2, -3},{ -3, -2},
+{ 3, -2},{ -3, 2},{ 3, 2},{ -2, 3},{ 2, 3},{ 0, -4},{ -4, 0},{ 4, 0},
+{ 0, 4},{ -1, -4},{ 1, -4},{ -4, -1},{ 4, -1},{ 4, 1},{ -1, 4},{ 1, 4},
+{ -3, -3},{ -3, 3},{ 3, 3},{ -2, -4},{ -4, -2},{ 4, -2},{ -4, 2},{ -2, 4},
+{ 2, 4},{ -3, -4},{ 3, -4},{ 4, -3},{ -5, 0},{ -4, 3},{ -3, 4},{ 3, 4},
+{ -1, -5},{ -5, -1},{ -5, 1},{ -1, 5},{ -2, -5},{ 2, -5},{ 5, -2},{ 5, 2},
+{ -4, -4},{ -4, 4},{ -3, -5},{ -5, -3},{ -5, 3},{ 3, 5},{ -6, 0},{ 0, 6},
+{ -6, -1},{ -6, 1},{ 1, 6},{ 2, -6},{ -6, 2},{ 2, 6},{ -5, -4},{ 5, 4},
+{ 4, 5},{ -6, -3},{ 6, 3},{ -7, 0},{ -1, -7},{ 5, -5},{ -7, 1},{ -1, 7},
+{ 4, -6},{ 6, 4},{ -2, -7},{ -7, 2},{ -3, -7},{ 7, -3},{ 3, 7},{ 6, -5},
+{ 0, -8},{ -1, -8},{ -7, -4},{ -8, 1},{ 4, 7},{ 2, -8},{ -2, 8},{ 6, 6},
+{ -8, 3},{ 5, -7},{ -5, 7},{ 8, -4},{ 0, -9},{ -9, -1},{ 1, 9},{ 7, -6},
+{ -7, 6},{ -5, -8},{ -5, 8},{ -9, 3},{ 9, -4},{ 7, -7},{ 8, -6},{ 6, 8},
+{ 10, 1},{-10, 2},{ 9, -5},{ 10, -3},{ -8, -7},{-10, -4},{ 6, -9},{-11, 0},
+{ 11, 1},{-11, -2},{ -2, 11},{ 7, -9},{ -7, 9},{ 10, 6},{ -4, 11},{ 8, -9},
+{ 8, 9},{ 5, 11},{ 7,-10},{ 12, -3},{ 11, 6},{ -9, -9},{ 8, 10},{ 5, 12},
+{-11, 7},{ 13, 2},{ 6,-12},{ 10, 9},{-11, 8},{ -7, 12},{ 0, 14},{ 14, -2},
+{ -9, 11},{ -6, 13},{-14, -4},{ -5,-14},{ 5, 14},{-15, -1},{-14, -6},{ 3,-15},
+{ 11,-11},{ -7, 14},{ -5, 15},{ 8,-14},{ 15, 6},{ 3, 16},{ 7,-15},{-16, 5},
+{ 0, 17},{-16, -6},{-10, 14},{-16, 7},{ 12, 13},{-16, 8},{-17, 6},{-18, 3},
+{ -7, 17},{ 15, 11},{ 16, 10},{ 2,-19},{ 3,-19},{-11,-16},{-18, 8},{-19, -6},
+{ 2,-20},{-17,-11},{-10,-18},{ 8, 19},{-21, -1},{-20, 7},{ -4, 21},{ 21, 5},
+{ 15, 16},{ 2,-22},{-10,-20},{-22, 5},{ 20,-11},{ -7,-22},{-12, 20},{ 23, -5},
+{ 13,-20},{ 24, -2},{-15, 19},{-11, 22},{ 16, 19},{ 23,-10},{-18,-18},{ -9,-24},
+{ 24,-10},{ -3, 26},{-23, 13},{-18,-20},{ 17, 21},{ -4, 27},{ 27, 6},{ 1,-28},
+{-11, 26},{-17,-23},{ 7, 28},{ 11,-27},{ 29, 5},{-23,-19},{-28,-11},{-21, 22},
+{-30, 7},{-17, 26},{-27, 16},{ 13, 29},{ 19,-26},{ 10,-31},{-14,-30},{ 20,-27},
+{-29, 18},{-16,-31},{-28,-22},{ 21,-30},{-25, 28},{ 26,-29},{ 25,-32},{-32,-32}
+};
+
+// this is simply the scaled down elementwise product of the standard jpeg quantizer table and the AAN premul table
+static const uint8_t dequant_table[64]={
+ 16, 15, 13, 19, 24, 31, 28, 17,
+ 17, 23, 25, 31, 36, 63, 45, 21,
+ 18, 24, 27, 37, 52, 59, 49, 20,
+ 16, 28, 34, 40, 60, 80, 51, 20,
+ 18, 31, 48, 66, 68, 86, 56, 21,
+ 19, 38, 56, 59, 64, 64, 48, 20,
+ 27, 48, 55, 55, 56, 51, 35, 15,
+ 20, 35, 34, 32, 31, 22, 15, 8,
+};
+
+static VLC block_type_vlc[2][4];
+
+
+typedef struct CFrameBuffer{
+ unsigned int allocated_size;
+ unsigned int size;
+ int id;
+ uint8_t *data;
+}CFrameBuffer;
+
+typedef struct FourXContext{
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ AVFrame current_picture, last_picture;
+ GetBitContext pre_gb; ///< ac/dc prefix
+ GetBitContext gb;
+ const uint8_t *bytestream;
+ const uint16_t *wordstream;
+ int mv[256];
+ VLC pre_vlc;
+ int last_dc;
+ DECLARE_ALIGNED(16, DCTELEM, block)[6][64];
+ void *bitstream_buffer;
+ unsigned int bitstream_buffer_size;
+ int version;
+ CFrameBuffer cfrm[CFRAME_BUFFER_COUNT];
+} FourXContext;
+
+
+#define FIX_1_082392200 70936
+#define FIX_1_414213562 92682
+#define FIX_1_847759065 121095
+#define FIX_2_613125930 171254
+
+#define MULTIPLY(var,const) (((var)*(const)) >> 16)
+
+static void idct(DCTELEM block[64]){
+ int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ int tmp10, tmp11, tmp12, tmp13;
+ int z5, z10, z11, z12, z13;
+ int i;
+ int temp[64];
+
+ for(i=0; i<8; i++){
+ tmp10 = block[8*0 + i] + block[8*4 + i];
+ tmp11 = block[8*0 + i] - block[8*4 + i];
+
+ tmp13 = block[8*2 + i] + block[8*6 + i];
+ tmp12 = MULTIPLY(block[8*2 + i] - block[8*6 + i], FIX_1_414213562) - tmp13;
+
+ tmp0 = tmp10 + tmp13;
+ tmp3 = tmp10 - tmp13;
+ tmp1 = tmp11 + tmp12;
+ tmp2 = tmp11 - tmp12;
+
+ z13 = block[8*5 + i] + block[8*3 + i];
+ z10 = block[8*5 + i] - block[8*3 + i];
+ z11 = block[8*1 + i] + block[8*7 + i];
+ z12 = block[8*1 + i] - block[8*7 + i];
+
+ tmp7 = z11 + z13;
+ tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562);
+
+ z5 = MULTIPLY(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5;
+
+ tmp6 = tmp12 - tmp7;
+ tmp5 = tmp11 - tmp6;
+ tmp4 = tmp10 + tmp5;
+
+ temp[8*0 + i] = tmp0 + tmp7;
+ temp[8*7 + i] = tmp0 - tmp7;
+ temp[8*1 + i] = tmp1 + tmp6;
+ temp[8*6 + i] = tmp1 - tmp6;
+ temp[8*2 + i] = tmp2 + tmp5;
+ temp[8*5 + i] = tmp2 - tmp5;
+ temp[8*4 + i] = tmp3 + tmp4;
+ temp[8*3 + i] = tmp3 - tmp4;
+ }
+
+ for(i=0; i<8*8; i+=8){
+ tmp10 = temp[0 + i] + temp[4 + i];
+ tmp11 = temp[0 + i] - temp[4 + i];
+
+ tmp13 = temp[2 + i] + temp[6 + i];
+ tmp12 = MULTIPLY(temp[2 + i] - temp[6 + i], FIX_1_414213562) - tmp13;
+
+ tmp0 = tmp10 + tmp13;
+ tmp3 = tmp10 - tmp13;
+ tmp1 = tmp11 + tmp12;
+ tmp2 = tmp11 - tmp12;
+
+ z13 = temp[5 + i] + temp[3 + i];
+ z10 = temp[5 + i] - temp[3 + i];
+ z11 = temp[1 + i] + temp[7 + i];
+ z12 = temp[1 + i] - temp[7 + i];
+
+ tmp7 = z11 + z13;
+ tmp11 = MULTIPLY(z11 - z13, FIX_1_414213562);
+
+ z5 = MULTIPLY(z10 + z12, FIX_1_847759065);
+ tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5;
+ tmp12 = MULTIPLY(z10, - FIX_2_613125930) + z5;
+
+ tmp6 = tmp12 - tmp7;
+ tmp5 = tmp11 - tmp6;
+ tmp4 = tmp10 + tmp5;
+
+ block[0 + i] = (tmp0 + tmp7)>>6;
+ block[7 + i] = (tmp0 - tmp7)>>6;
+ block[1 + i] = (tmp1 + tmp6)>>6;
+ block[6 + i] = (tmp1 - tmp6)>>6;
+ block[2 + i] = (tmp2 + tmp5)>>6;
+ block[5 + i] = (tmp2 - tmp5)>>6;
+ block[4 + i] = (tmp3 + tmp4)>>6;
+ block[3 + i] = (tmp3 - tmp4)>>6;
+ }
+}
+
+static av_cold void init_vlcs(FourXContext *f){
+ static VLC_TYPE table[8][32][2];
+ int i;
+
+ for(i=0; i<8; i++){
+ block_type_vlc[0][i].table= table[i];
+ block_type_vlc[0][i].table_allocated= 32;
+ init_vlc(&block_type_vlc[0][i], BLOCK_TYPE_VLC_BITS, 7,
+ &block_type_tab[0][i][0][1], 2, 1,
+ &block_type_tab[0][i][0][0], 2, 1, INIT_VLC_USE_NEW_STATIC);
+ }
+}
+
+static void init_mv(FourXContext *f){
+ int i;
+
+ for(i=0; i<256; i++){
+ if(f->version>1)
+ f->mv[i] = mv[i][0] + mv[i][1] *f->current_picture.linesize[0]/2;
+ else
+ f->mv[i] = (i&15) - 8 + ((i>>4)-8)*f->current_picture.linesize[0]/2;
+ }
+}
+
+#if HAVE_BIGENDIAN
+#define LE_CENTRIC_MUL(dst, src, scale, dc) \
+ { \
+ unsigned tmpval = AV_RN32(src); \
+ tmpval = (tmpval << 16) | (tmpval >> 16); \
+ tmpval = tmpval * (scale) + (dc); \
+ tmpval = (tmpval << 16) | (tmpval >> 16); \
+ AV_WN32A(dst, tmpval); \
+ }
+#else
+#define LE_CENTRIC_MUL(dst, src, scale, dc) \
+ { \
+ unsigned tmpval = AV_RN32(src) * (scale) + (dc); \
+ AV_WN32A(dst, tmpval); \
+ }
+#endif
+
+static inline void mcdc(uint16_t *dst, uint16_t *src, int log2w, int h, int stride, int scale, int dc){
+ int i;
+ dc*= 0x10001;
+
+ switch(log2w){
+ case 0:
+ for(i=0; i<h; i++){
+ dst[0] = scale*src[0] + dc;
+ if(scale) src += stride;
+ dst += stride;
+ }
+ break;
+ case 1:
+ for(i=0; i<h; i++){
+ LE_CENTRIC_MUL(dst, src, scale, dc);
+ if(scale) src += stride;
+ dst += stride;
+ }
+ break;
+ case 2:
+ for(i=0; i<h; i++){
+ LE_CENTRIC_MUL(dst, src, scale, dc);
+ LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc);
+ if(scale) src += stride;
+ dst += stride;
+ }
+ break;
+ case 3:
+ for(i=0; i<h; i++){
+ LE_CENTRIC_MUL(dst, src, scale, dc);
+ LE_CENTRIC_MUL(dst + 2, src + 2, scale, dc);
+ LE_CENTRIC_MUL(dst + 4, src + 4, scale, dc);
+ LE_CENTRIC_MUL(dst + 6, src + 6, scale, dc);
+ if(scale) src += stride;
+ dst += stride;
+ }
+ break;
+ default: assert(0);
+ }
+}
+
+static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride){
+ const int index= size2index[log2h][log2w];
+ const int h= 1<<log2h;
+ int code= get_vlc2(&f->gb, block_type_vlc[1-(f->version>1)][index].table, BLOCK_TYPE_VLC_BITS, 1);
+ uint16_t *start= (uint16_t*)f->last_picture.data[0];
+ uint16_t *end= start + stride*(f->avctx->height-h+1) - (1<<log2w);
+
+ assert(code>=0 && code<=6);
+
+ if(code == 0){
+ src += f->mv[ *f->bytestream++ ];
+ if(start > src || src > end){
+ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
+ return;
+ }
+ mcdc(dst, src, log2w, h, stride, 1, 0);
+ }else if(code == 1){
+ log2h--;
+ decode_p_block(f, dst , src , log2w, log2h, stride);
+ decode_p_block(f, dst + (stride<<log2h), src + (stride<<log2h), log2w, log2h, stride);
+ }else if(code == 2){
+ log2w--;
+ decode_p_block(f, dst , src , log2w, log2h, stride);
+ decode_p_block(f, dst + (1<<log2w), src + (1<<log2w), log2w, log2h, stride);
+ }else if(code == 3 && f->version<2){
+ mcdc(dst, src, log2w, h, stride, 1, 0);
+ }else if(code == 4){
+ src += f->mv[ *f->bytestream++ ];
+ if(start > src || src > end){
+ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n");
+ return;
+ }
+ mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++));
+ }else if(code == 5){
+ mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++));
+ }else if(code == 6){
+ if(log2w){
+ dst[0] = av_le2ne16(*f->wordstream++);
+ dst[1] = av_le2ne16(*f->wordstream++);
+ }else{
+ dst[0 ] = av_le2ne16(*f->wordstream++);
+ dst[stride] = av_le2ne16(*f->wordstream++);
+ }
+ }
+}
+
+static int decode_p_frame(FourXContext *f, const uint8_t *buf, int length){
+ int x, y;
+ const int width= f->avctx->width;
+ const int height= f->avctx->height;
+ uint16_t *src= (uint16_t*)f->last_picture.data[0];
+ uint16_t *dst= (uint16_t*)f->current_picture.data[0];
+ const int stride= f->current_picture.linesize[0]>>1;
+ unsigned int bitstream_size, bytestream_size, wordstream_size, extra;
+
+ if(f->version>1){
+ extra=20;
+ bitstream_size= AV_RL32(buf+8);
+ wordstream_size= AV_RL32(buf+12);
+ bytestream_size= AV_RL32(buf+16);
+ }else{
+ extra=0;
+ bitstream_size = AV_RL16(buf-4);
+ wordstream_size= AV_RL16(buf-2);
+ bytestream_size= FFMAX(length - bitstream_size - wordstream_size, 0);
+ }
+
+ if(bitstream_size+ bytestream_size+ wordstream_size + extra != length
+ || bitstream_size > (1<<26)
+ || bytestream_size > (1<<26)
+ || wordstream_size > (1<<26)
+ ){
+ av_log(f->avctx, AV_LOG_ERROR, "lengths %d %d %d %d\n", bitstream_size, bytestream_size, wordstream_size,
+ bitstream_size+ bytestream_size+ wordstream_size - length);
+ return -1;
+ }
+
+ av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, bitstream_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!f->bitstream_buffer)
+ return AVERROR(ENOMEM);
+ f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)(buf + extra), bitstream_size/4);
+ init_get_bits(&f->gb, f->bitstream_buffer, 8*bitstream_size);
+
+ f->wordstream= (const uint16_t*)(buf + extra + bitstream_size);
+ f->bytestream= buf + extra + bitstream_size + wordstream_size;
+
+ init_mv(f);
+
+ for(y=0; y<height; y+=8){
+ for(x=0; x<width; x+=8){
+ decode_p_block(f, dst + x, src + x, 3, 3, stride);
+ }
+ src += 8*stride;
+ dst += 8*stride;
+ }
+
+ if( bitstream_size != (get_bits_count(&f->gb)+31)/32*4
+ || (((const char*)f->wordstream - (const char*)buf + 2)&~2) != extra + bitstream_size + wordstream_size
+ || (((const char*)f->bytestream - (const char*)buf + 3)&~3) != extra + bitstream_size + wordstream_size + bytestream_size)
+ av_log(f->avctx, AV_LOG_ERROR, " %d %td %td bytes left\n",
+ bitstream_size - (get_bits_count(&f->gb)+31)/32*4,
+ -(((const char*)f->bytestream - (const char*)buf + 3)&~3) + (extra + bitstream_size + wordstream_size + bytestream_size),
+ -(((const char*)f->wordstream - (const char*)buf + 2)&~2) + (extra + bitstream_size + wordstream_size)
+ );
+
+ return 0;
+}
+
+/**
+ * decode block and dequantize.
+ * Note this is almost identical to MJPEG.
+ */
+static int decode_i_block(FourXContext *f, DCTELEM *block){
+ int code, i, j, level, val;
+
+ /* DC coef */
+ val = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3);
+ if (val>>4){
+ av_log(f->avctx, AV_LOG_ERROR, "error dc run != 0\n");
+ }
+
+ if(val)
+ val = get_xbits(&f->gb, val);
+
+ val = val * dequant_table[0] + f->last_dc;
+ f->last_dc =
+ block[0] = val;
+ /* AC coefs */
+ i = 1;
+ for(;;) {
+ code = get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3);
+
+ /* EOB */
+ if (code == 0)
+ break;
+ if (code == 0xf0) {
+ i += 16;
+ } else {
+ level = get_xbits(&f->gb, code & 0xf);
+ i += code >> 4;
+ if (i >= 64) {
+ av_log(f->avctx, AV_LOG_ERROR, "run %d oveflow\n", i);
+ return 0;
+ }
+
+ j= ff_zigzag_direct[i];
+ block[j] = level * dequant_table[j];
+ i++;
+ if (i >= 64)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static inline void idct_put(FourXContext *f, int x, int y){
+ DCTELEM (*block)[64]= f->block;
+ int stride= f->current_picture.linesize[0]>>1;
+ int i;
+ uint16_t *dst = ((uint16_t*)f->current_picture.data[0]) + y * stride + x;
+
+ for(i=0; i<4; i++){
+ block[i][0] += 0x80*8*8;
+ idct(block[i]);
+ }
+
+ if(!(f->avctx->flags&CODEC_FLAG_GRAY)){
+ for(i=4; i<6; i++) idct(block[i]);
+ }
+
+/* Note transform is:
+y= ( 1b + 4g + 2r)/14
+cb=( 3b - 2g - 1r)/14
+cr=(-1b - 4g + 5r)/14
+*/
+ for(y=0; y<8; y++){
+ for(x=0; x<8; x++){
+ DCTELEM *temp= block[(x>>2) + 2*(y>>2)] + 2*(x&3) + 2*8*(y&3); //FIXME optimize
+ int cb= block[4][x + 8*y];
+ int cr= block[5][x + 8*y];
+ int cg= (cb + cr)>>1;
+ int y;
+
+ cb+=cb;
+
+ y = temp[0];
+ dst[0 ]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8);
+ y = temp[1];
+ dst[1 ]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8);
+ y = temp[8];
+ dst[ stride]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8);
+ y = temp[9];
+ dst[1+stride]= ((y+cb)>>3) + (((y-cg)&0xFC)<<3) + (((y+cr)&0xF8)<<8);
+ dst += 2;
+ }
+ dst += 2*stride - 2*8;
+ }
+}
+
+static int decode_i_mb(FourXContext *f){
+ int i;
+
+ f->dsp.clear_blocks(f->block[0]);
+
+ for(i=0; i<6; i++){
+ if(decode_i_block(f, f->block[i]) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static const uint8_t *read_huffman_tables(FourXContext *f, const uint8_t * const buf){
+ int frequency[512];
+ uint8_t flag[512];
+ int up[512];
+ uint8_t len_tab[257];
+ int bits_tab[257];
+ int start, end;
+ const uint8_t *ptr= buf;
+ int j;
+
+ memset(frequency, 0, sizeof(frequency));
+ memset(up, -1, sizeof(up));
+
+ start= *ptr++;
+ end= *ptr++;
+ for(;;){
+ int i;
+
+ for(i=start; i<=end; i++){
+ frequency[i]= *ptr++;
+ }
+ start= *ptr++;
+ if(start==0) break;
+
+ end= *ptr++;
+ }
+ frequency[256]=1;
+
+ while((ptr - buf)&3) ptr++; // 4byte align
+
+ for(j=257; j<512; j++){
+ int min_freq[2]= {256*256, 256*256};
+ int smallest[2]= {0, 0};
+ int i;
+ for(i=0; i<j; i++){
+ if(frequency[i] == 0) continue;
+ if(frequency[i] < min_freq[1]){
+ if(frequency[i] < min_freq[0]){
+ min_freq[1]= min_freq[0]; smallest[1]= smallest[0];
+ min_freq[0]= frequency[i];smallest[0]= i;
+ }else{
+ min_freq[1]= frequency[i];smallest[1]= i;
+ }
+ }
+ }
+ if(min_freq[1] == 256*256) break;
+
+ frequency[j]= min_freq[0] + min_freq[1];
+ flag[ smallest[0] ]= 0;
+ flag[ smallest[1] ]= 1;
+ up[ smallest[0] ]=
+ up[ smallest[1] ]= j;
+ frequency[ smallest[0] ]= frequency[ smallest[1] ]= 0;
+ }
+
+ for(j=0; j<257; j++){
+ int node;
+ int len=0;
+ int bits=0;
+
+ for(node= j; up[node] != -1; node= up[node]){
+ bits += flag[node]<<len;
+ len++;
+ if(len > 31) av_log(f->avctx, AV_LOG_ERROR, "vlc length overflow\n"); //can this happen at all ?
+ }
+
+ bits_tab[j]= bits;
+ len_tab[j]= len;
+ }
+
+ init_vlc(&f->pre_vlc, ACDC_VLC_BITS, 257,
+ len_tab , 1, 1,
+ bits_tab, 4, 4, 0);
+
+ return ptr;
+}
+
+static int mix(int c0, int c1){
+ int blue = 2*(c0&0x001F) + (c1&0x001F);
+ int green= (2*(c0&0x03E0) + (c1&0x03E0))>>5;
+ int red = 2*(c0>>10) + (c1>>10);
+ return red/3*1024 + green/3*32 + blue/3;
+}
+
+static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length){
+ int x, y, x2, y2;
+ const int width= f->avctx->width;
+ const int height= f->avctx->height;
+ uint16_t *dst= (uint16_t*)f->current_picture.data[0];
+ const int stride= f->current_picture.linesize[0]>>1;
+
+ for(y=0; y<height; y+=16){
+ for(x=0; x<width; x+=16){
+ unsigned int color[4], bits;
+ memset(color, 0, sizeof(color));
+//warning following is purely guessed ...
+ color[0]= bytestream_get_le16(&buf);
+ color[1]= bytestream_get_le16(&buf);
+
+ if(color[0]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 1\n");
+ if(color[1]&0x8000) av_log(NULL, AV_LOG_ERROR, "unk bit 2\n");
+
+ color[2]= mix(color[0], color[1]);
+ color[3]= mix(color[1], color[0]);
+
+ bits= bytestream_get_le32(&buf);
+ for(y2=0; y2<16; y2++){
+ for(x2=0; x2<16; x2++){
+ int index= 2*(x2>>2) + 8*(y2>>2);
+ dst[y2*stride+x2]= color[(bits>>index)&3];
+ }
+ }
+ dst+=16;
+ }
+ dst += 16*stride - width;
+ }
+
+ return 0;
+}
+
+static int decode_i_frame(FourXContext *f, const uint8_t *buf, int length){
+ int x, y;
+ const int width= f->avctx->width;
+ const int height= f->avctx->height;
+ uint16_t *dst= (uint16_t*)f->current_picture.data[0];
+ const int stride= f->current_picture.linesize[0]>>1;
+ const unsigned int bitstream_size= AV_RL32(buf);
+ const int token_count av_unused = AV_RL32(buf + bitstream_size + 8);
+ unsigned int prestream_size= 4*AV_RL32(buf + bitstream_size + 4);
+ const uint8_t *prestream= buf + bitstream_size + 12;
+
+ if(prestream_size + bitstream_size + 12 != length
+ || bitstream_size > (1<<26)
+ || prestream_size > (1<<26)){
+ av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d %d\n", prestream_size, bitstream_size, length);
+ return -1;
+ }
+
+ prestream= read_huffman_tables(f, prestream);
+
+ init_get_bits(&f->gb, buf + 4, 8*bitstream_size);
+
+ prestream_size= length + buf - prestream;
+
+ av_fast_malloc(&f->bitstream_buffer, &f->bitstream_buffer_size, prestream_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!f->bitstream_buffer)
+ return AVERROR(ENOMEM);
+ f->dsp.bswap_buf(f->bitstream_buffer, (const uint32_t*)prestream, prestream_size/4);
+ init_get_bits(&f->pre_gb, f->bitstream_buffer, 8*prestream_size);
+
+ f->last_dc= 0*128*8*8;
+
+ for(y=0; y<height; y+=16){
+ for(x=0; x<width; x+=16){
+ if(decode_i_mb(f) < 0)
+ return -1;
+
+ idct_put(f, x, y);
+ }
+ dst += 16*stride;
+ }
+
+ if(get_vlc2(&f->pre_gb, f->pre_vlc.table, ACDC_VLC_BITS, 3) != 256)
+ av_log(f->avctx, AV_LOG_ERROR, "end mismatch\n");
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ FourXContext * const f = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame *p, temp;
+ int i, frame_4cc, frame_size;
+
+ frame_4cc= AV_RL32(buf);
+ if(buf_size != AV_RL32(buf+4)+8 || buf_size < 20){
+ av_log(f->avctx, AV_LOG_ERROR, "size mismatch %d %d\n", buf_size, AV_RL32(buf+4));
+ }
+
+ if(frame_4cc == AV_RL32("cfrm")){
+ int free_index=-1;
+ const int data_size= buf_size - 20;
+ const int id= AV_RL32(buf+12);
+ const int whole_size= AV_RL32(buf+16);
+ CFrameBuffer *cfrm;
+
+ for(i=0; i<CFRAME_BUFFER_COUNT; i++){
+ if(f->cfrm[i].id && f->cfrm[i].id < avctx->frame_number)
+ av_log(f->avctx, AV_LOG_ERROR, "lost c frame %d\n", f->cfrm[i].id);
+ }
+
+ for(i=0; i<CFRAME_BUFFER_COUNT; i++){
+ if(f->cfrm[i].id == id) break;
+ if(f->cfrm[i].size == 0 ) free_index= i;
+ }
+
+ if(i>=CFRAME_BUFFER_COUNT){
+ i= free_index;
+ f->cfrm[i].id= id;
+ }
+ cfrm= &f->cfrm[i];
+
+ cfrm->data= av_fast_realloc(cfrm->data, &cfrm->allocated_size, cfrm->size + data_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if(!cfrm->data){ //explicit check needed as memcpy below might not catch a NULL
+ av_log(f->avctx, AV_LOG_ERROR, "realloc falure");
+ return -1;
+ }
+
+ memcpy(cfrm->data + cfrm->size, buf+20, data_size);
+ cfrm->size += data_size;
+
+ if(cfrm->size >= whole_size){
+ buf= cfrm->data;
+ frame_size= cfrm->size;
+
+ if(id != avctx->frame_number){
+ av_log(f->avctx, AV_LOG_ERROR, "cframe id mismatch %d %d\n", id, avctx->frame_number);
+ }
+
+ cfrm->size= cfrm->id= 0;
+ frame_4cc= AV_RL32("pfrm");
+ }else
+ return buf_size;
+ }else{
+ buf= buf + 12;
+ frame_size= buf_size - 12;
+ }
+
+ temp= f->current_picture;
+ f->current_picture= f->last_picture;
+ f->last_picture= temp;
+
+ p= &f->current_picture;
+ avctx->coded_frame= p;
+
+ avctx->flags |= CODEC_FLAG_EMU_EDGE; // alternatively we would have to use our own buffer management
+
+ if(p->data[0])
+ avctx->release_buffer(avctx, p);
+
+ p->reference= 1;
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ if(frame_4cc == AV_RL32("ifr2")){
+ p->pict_type= FF_I_TYPE;
+ if(decode_i2_frame(f, buf-4, frame_size) < 0)
+ return -1;
+ }else if(frame_4cc == AV_RL32("ifrm")){
+ p->pict_type= FF_I_TYPE;
+ if(decode_i_frame(f, buf, frame_size) < 0)
+ return -1;
+ }else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
+ if(!f->last_picture.data[0]){
+ f->last_picture.reference= 1;
+ if(avctx->get_buffer(avctx, &f->last_picture) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ }
+
+ p->pict_type= FF_P_TYPE;
+ if(decode_p_frame(f, buf, frame_size) < 0)
+ return -1;
+ }else if(frame_4cc == AV_RL32("snd_")){
+ av_log(avctx, AV_LOG_ERROR, "ignoring snd_ chunk length:%d\n", buf_size);
+ }else{
+ av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size);
+ }
+
+ p->key_frame= p->pict_type == FF_I_TYPE;
+
+ *picture= *p;
+ *data_size = sizeof(AVPicture);
+
+ emms_c();
+
+ return buf_size;
+}
+
+
+static av_cold void common_init(AVCodecContext *avctx){
+ FourXContext * const f = avctx->priv_data;
+
+ dsputil_init(&f->dsp, avctx);
+
+ f->avctx= avctx;
+}
+
+static av_cold int decode_init(AVCodecContext *avctx){
+ FourXContext * const f = avctx->priv_data;
+
+ if(avctx->extradata_size != 4 || !avctx->extradata) {
+ av_log(avctx, AV_LOG_ERROR, "extradata wrong or missing\n");
+ return 1;
+ }
+
+ f->version= AV_RL32(avctx->extradata)>>16;
+ common_init(avctx);
+ init_vlcs(f);
+
+ if(f->version>2) avctx->pix_fmt= PIX_FMT_RGB565;
+ else avctx->pix_fmt= PIX_FMT_BGR555;
+
+ return 0;
+}
+
+
+static av_cold int decode_end(AVCodecContext *avctx){
+ FourXContext * const f = avctx->priv_data;
+ int i;
+
+ av_freep(&f->bitstream_buffer);
+ f->bitstream_buffer_size=0;
+ for(i=0; i<CFRAME_BUFFER_COUNT; i++){
+ av_freep(&f->cfrm[i].data);
+ f->cfrm[i].allocated_size= 0;
+ }
+ free_vlc(&f->pre_vlc);
+ if(f->current_picture.data[0])
+ avctx->release_buffer(avctx, &f->current_picture);
+ if(f->last_picture.data[0])
+ avctx->release_buffer(avctx, &f->last_picture);
+
+ return 0;
+}
+
+AVCodec ff_fourxm_decoder = {
+ "4xm",
+ AVMEDIA_TYPE_VIDEO,
+ CODEC_ID_4XM,
+ sizeof(FourXContext),
+ decode_init,
+ NULL,
+ decode_end,
+ decode_frame,
+ CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("4X Movie"),
+};
+
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/8bps.c b/samples/rtsp_player/ffmpeg/libavcodec/8bps.c
new file mode 100755
index 0000000..d5f550f
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/8bps.c
@@ -0,0 +1,234 @@
+/*
+ * Quicktime Planar RGB (8BPS) Video Decoder
+ * Copyright (C) 2003 Roberto Togni
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * QT 8BPS Video Decoder by Roberto Togni
+ * For more information about the 8BPS format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ *
+ * Supports: PAL8 (RGB 8bpp, paletted)
+ * : BGR24 (RGB 24bpp) (can also output it as RGB32)
+ * : RGB32 (RGB 32bpp, 4th plane is probably alpha and it's ignored)
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+
+
+static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGB32, PIX_FMT_NONE};
+
+/*
+ * Decoder context
+ */
+typedef struct EightBpsContext {
+
+ AVCodecContext *avctx;
+ AVFrame pic;
+
+ unsigned char planes;
+ unsigned char planemap[4];
+
+ uint32_t pal[256];
+} EightBpsContext;
+
+
+/*
+ *
+ * Decode a frame
+ *
+ */
+static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ EightBpsContext * const c = avctx->priv_data;
+ const unsigned char *encoded = buf;
+ unsigned char *pixptr, *pixptr_end;
+ unsigned int height = avctx->height; // Real image height
+ unsigned int dlen, p, row;
+ const unsigned char *lp, *dp;
+ unsigned char count;
+ unsigned int px_inc;
+ unsigned int planes = c->planes;
+ unsigned char *planemap = c->planemap;
+
+ if(c->pic.data[0])
+ avctx->release_buffer(avctx, &c->pic);
+
+ c->pic.reference = 0;
+ c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
+ if(avctx->get_buffer(avctx, &c->pic) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ /* Set data pointer after line lengths */
+ dp = encoded + planes * (height << 1);
+
+ /* Ignore alpha plane, don't know what to do with it */
+ if (planes == 4)
+ planes--;
+
+ px_inc = planes + (avctx->pix_fmt == PIX_FMT_RGB32);
+
+ for (p = 0; p < planes; p++) {
+ /* Lines length pointer for this plane */
+ lp = encoded + p * (height << 1);
+
+ /* Decode a plane */
+ for(row = 0; row < height; row++) {
+ pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
+ pixptr_end = pixptr + c->pic.linesize[0];
+ dlen = av_be2ne16(*(const unsigned short *)(lp+row*2));
+ /* Decode a row of this plane */
+ while(dlen > 0) {
+ if(dp + 1 >= buf+buf_size) return -1;
+ if ((count = *dp++) <= 127) {
+ count++;
+ dlen -= count + 1;
+ if (pixptr + count * px_inc > pixptr_end)
+ break;
+ if(dp + count > buf+buf_size) return -1;
+ while(count--) {
+ *pixptr = *dp++;
+ pixptr += px_inc;
+ }
+ } else {
+ count = 257 - count;
+ if (pixptr + count * px_inc > pixptr_end)
+ break;
+ while(count--) {
+ *pixptr = *dp;
+ pixptr += px_inc;
+ }
+ dp++;
+ dlen -= 2;
+ }
+ }
+ }
+ }
+
+ if (avctx->bits_per_coded_sample <= 8) {
+ const uint8_t *pal = av_packet_get_side_data(avpkt,
+ AV_PKT_DATA_PALETTE,
+ NULL);
+ if (pal) {
+ c->pic.palette_has_changed = 1;
+ memcpy(c->pal, pal, AVPALETTE_SIZE);
+ }
+
+ memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
+ }
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = c->pic;
+
+ /* always report that the buffer was completely consumed */
+ return buf_size;
+}
+
+
+/*
+ *
+ * Init 8BPS decoder
+ *
+ */
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ EightBpsContext * const c = avctx->priv_data;
+
+ c->avctx = avctx;
+
+ c->pic.data[0] = NULL;
+
+ switch (avctx->bits_per_coded_sample) {
+ case 8:
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ c->planes = 1;
+ c->planemap[0] = 0; // 1st plane is palette indexes
+ break;
+ case 24:
+ avctx->pix_fmt = avctx->get_format(avctx, pixfmt_rgb24);
+ c->planes = 3;
+ c->planemap[0] = 2; // 1st plane is red
+ c->planemap[1] = 1; // 2nd plane is green
+ c->planemap[2] = 0; // 3rd plane is blue
+ break;
+ case 32:
+ avctx->pix_fmt = PIX_FMT_RGB32;
+ c->planes = 4;
+#if HAVE_BIGENDIAN
+ c->planemap[0] = 1; // 1st plane is red
+ c->planemap[1] = 2; // 2nd plane is green
+ c->planemap[2] = 3; // 3rd plane is blue
+ c->planemap[3] = 0; // 4th plane is alpha???
+#else
+ c->planemap[0] = 2; // 1st plane is red
+ c->planemap[1] = 1; // 2nd plane is green
+ c->planemap[2] = 0; // 3rd plane is blue
+ c->planemap[3] = 3; // 4th plane is alpha???
+#endif
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Error: Unsupported color depth: %u.\n", avctx->bits_per_coded_sample);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+
+/*
+ *
+ * Uninit 8BPS decoder
+ *
+ */
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ EightBpsContext * const c = avctx->priv_data;
+
+ if (c->pic.data[0])
+ avctx->release_buffer(avctx, &c->pic);
+
+ return 0;
+}
+
+
+
+AVCodec ff_eightbps_decoder = {
+ "8bps",
+ AVMEDIA_TYPE_VIDEO,
+ CODEC_ID_8BPS,
+ sizeof(EightBpsContext),
+ decode_init,
+ NULL,
+ decode_end,
+ decode_frame,
+ CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime 8BPS video"),
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/8svx.c b/samples/rtsp_player/ffmpeg/libavcodec/8svx.c
new file mode 100755
index 0000000..3f7d3ef
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/8svx.c
@@ -0,0 +1,113 @@
+/*
+ * 8SVX audio decoder
+ * Copyright (C) 2008 Jaikrishnan Menon
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * 8svx audio decoder
+ * @author Jaikrishnan Menon
+ * supports: fibonacci delta encoding
+ * : exponential encoding
+ */
+
+#include "avcodec.h"
+
+/** decoder context */
+typedef struct EightSvxContext {
+ int16_t fib_acc;
+ const int16_t *table;
+} EightSvxContext;
+
+static const int16_t fibonacci[16] = { -34<<8, -21<<8, -13<<8, -8<<8, -5<<8, -3<<8, -2<<8, -1<<8,
+ 0, 1<<8, 2<<8, 3<<8, 5<<8, 8<<8, 13<<8, 21<<8 };
+static const int16_t exponential[16] = { -128<<8, -64<<8, -32<<8, -16<<8, -8<<8, -4<<8, -2<<8, -1<<8,
+ 0, 1<<8, 2<<8, 4<<8, 8<<8, 16<<8, 32<<8, 64<<8 };
+
+/** decode a frame */
+static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ EightSvxContext *esc = avctx->priv_data;
+ int16_t *out_data = data;
+ int consumed = buf_size;
+ const uint8_t *buf_end = buf + buf_size;
+
+ if((*data_size >> 2) < buf_size)
+ return -1;
+
+ if(avctx->frame_number == 0) {
+ esc->fib_acc = buf[1] << 8;
+ buf_size -= 2;
+ buf += 2;
+ }
+
+ *data_size = buf_size << 2;
+
+ while(buf < buf_end) {
+ uint8_t d = *buf++;
+ esc->fib_acc += esc->table[d & 0x0f];
+ *out_data++ = esc->fib_acc;
+ esc->fib_acc += esc->table[d >> 4];
+ *out_data++ = esc->fib_acc;
+ }
+
+ return consumed;
+}
+
+/** initialize 8svx decoder */
+static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
+{
+ EightSvxContext *esc = avctx->priv_data;
+
+ switch(avctx->codec->id) {
+ case CODEC_ID_8SVX_FIB:
+ esc->table = fibonacci;
+ break;
+ case CODEC_ID_8SVX_EXP:
+ esc->table = exponential;
+ break;
+ default:
+ return -1;
+ }
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ return 0;
+}
+
+AVCodec ff_eightsvx_fib_decoder = {
+ .name = "8svx_fib",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_8SVX_FIB,
+ .priv_data_size = sizeof (EightSvxContext),
+ .init = eightsvx_decode_init,
+ .decode = eightsvx_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
+};
+
+AVCodec ff_eightsvx_exp_decoder = {
+ .name = "8svx_exp",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_8SVX_EXP,
+ .priv_data_size = sizeof (EightSvxContext),
+ .init = eightsvx_decode_init,
+ .decode = eightsvx_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/Makefile b/samples/rtsp_player/ffmpeg/libavcodec/Makefile
new file mode 100755
index 0000000..5a4a782
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/Makefile
@@ -0,0 +1,725 @@
+include $(SUBDIR)../config.mak
+
+NAME = avcodec
+FFLIBS = avutil
+
+HEADERS = avcodec.h avfft.h dxva2.h opt.h vaapi.h vdpau.h version.h xvmc.h
+
+OBJS = allcodecs.o \
+ avpacket.o \
+ bitstream.o \
+ bitstream_filter.o \
+ dsputil.o \
+ faanidct.o \
+ fmtconvert.o \
+ imgconvert.o \
+ jrevdct.o \
+ options.o \
+ parser.o \
+ raw.o \
+ resample.o \
+ resample2.o \
+ simple_idct.o \
+ utils.o \
+
+# parts needed for many different codecs
+OBJS-$(CONFIG_AANDCT) += aandcttab.o
+OBJS-$(CONFIG_AC3DSP) += ac3dsp.o
+OBJS-$(CONFIG_CRYSTALHD) += crystalhd.o
+OBJS-$(CONFIG_ENCODERS) += faandct.o jfdctfst.o jfdctint.o
+OBJS-$(CONFIG_DCT) += dct.o
+OBJS-$(CONFIG_DWT) += dwt.o
+OBJS-$(CONFIG_DXVA2) += dxva2.o
+FFT-OBJS-$(CONFIG_HARDCODED_TABLES) += cos_tables.o cos_fixed_tables.o
+OBJS-$(CONFIG_FFT) += avfft.o fft_fixed.o fft_float.o \
+ $(FFT-OBJS-yes)
+OBJS-$(CONFIG_GOLOMB) += golomb.o
+OBJS-$(CONFIG_H264DSP) += h264dsp.o h264idct.o
+OBJS-$(CONFIG_H264PRED) += h264pred.o
+OBJS-$(CONFIG_HUFFMAN) += huffman.o
+OBJS-$(CONFIG_LPC) += lpc.o
+OBJS-$(CONFIG_LSP) += lsp.o
+OBJS-$(CONFIG_MDCT) += mdct_fixed.o mdct_float.o
+RDFT-OBJS-$(CONFIG_HARDCODED_TABLES) += sin_tables.o
+OBJS-$(CONFIG_RDFT) += rdft.o $(RDFT-OBJS-yes)
+OBJS-$(CONFIG_SINEWIN) += sinewin.o
+OBJS-$(CONFIG_VAAPI) += vaapi.o
+OBJS-$(CONFIG_VDPAU) += vdpau.o
+
+# decoders/encoders/hardware accelerators
+OBJS-$(CONFIG_A64MULTI_ENCODER) += a64multienc.o elbg.o
+OBJS-$(CONFIG_A64MULTI5_ENCODER) += a64multienc.o elbg.o
+OBJS-$(CONFIG_AAC_DECODER) += aacdec.o aactab.o aacsbr.o aacps.o \
+ aacadtsdec.o mpeg4audio.o kbdwin.o
+OBJS-$(CONFIG_AAC_ENCODER) += aacenc.o aaccoder.o \
+ aacpsy.o aactab.o \
+ psymodel.o iirfilter.o \
+ mpeg4audio.o kbdwin.o
+OBJS-$(CONFIG_AASC_DECODER) += aasc.o msrledec.o
+OBJS-$(CONFIG_AC3_DECODER) += ac3dec.o ac3dec_data.o ac3.o kbdwin.o
+OBJS-$(CONFIG_AC3_ENCODER) += ac3enc_combined.o ac3enc_fixed.o ac3enc_float.o ac3tab.o ac3.o kbdwin.o
+OBJS-$(CONFIG_AC3_FLOAT_ENCODER) += ac3enc_float.o ac3tab.o ac3.o kbdwin.o
+OBJS-$(CONFIG_AC3_FIXED_ENCODER) += ac3enc_fixed.o ac3tab.o ac3.o
+OBJS-$(CONFIG_ALAC_DECODER) += alac.o
+OBJS-$(CONFIG_ALAC_ENCODER) += alacenc.o
+OBJS-$(CONFIG_ALS_DECODER) += alsdec.o bgmc.o mpeg4audio.o
+OBJS-$(CONFIG_AMRNB_DECODER) += amrnbdec.o celp_filters.o \
+ celp_math.o acelp_filters.o \
+ acelp_vectors.o \
+ acelp_pitch_delay.o
+OBJS-$(CONFIG_AMRWB_DECODER) += amrwbdec.o celp_filters.o \
+ celp_math.o acelp_filters.o \
+ acelp_vectors.o \
+ acelp_pitch_delay.o lsp.o
+OBJS-$(CONFIG_AMV_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_ANM_DECODER) += anm.o
+OBJS-$(CONFIG_ANSI_DECODER) += ansi.o cga_data.o
+OBJS-$(CONFIG_APE_DECODER) += apedec.o
+OBJS-$(CONFIG_ASS_DECODER) += assdec.o ass.o ass_split.o
+OBJS-$(CONFIG_ASS_ENCODER) += assenc.o ass.o
+OBJS-$(CONFIG_ASV1_DECODER) += asv1.o mpeg12data.o
+OBJS-$(CONFIG_ASV1_ENCODER) += asv1.o mpeg12data.o
+OBJS-$(CONFIG_ASV2_DECODER) += asv1.o mpeg12data.o
+OBJS-$(CONFIG_ASV2_ENCODER) += asv1.o mpeg12data.o
+OBJS-$(CONFIG_ATRAC1_DECODER) += atrac1.o atrac.o
+OBJS-$(CONFIG_ATRAC3_DECODER) += atrac3.o atrac.o
+OBJS-$(CONFIG_AURA_DECODER) += cyuv.o
+OBJS-$(CONFIG_AURA2_DECODER) += aura.o
+OBJS-$(CONFIG_AVS_DECODER) += avs.o
+OBJS-$(CONFIG_BETHSOFTVID_DECODER) += bethsoftvideo.o
+OBJS-$(CONFIG_BFI_DECODER) += bfi.o
+OBJS-$(CONFIG_BINK_DECODER) += bink.o binkidct.o
+OBJS-$(CONFIG_BINKAUDIO_DCT_DECODER) += binkaudio.o wma.o
+OBJS-$(CONFIG_BINKAUDIO_RDFT_DECODER) += binkaudio.o wma.o
+OBJS-$(CONFIG_BMP_DECODER) += bmp.o msrledec.o
+OBJS-$(CONFIG_BMP_ENCODER) += bmpenc.o
+OBJS-$(CONFIG_C93_DECODER) += c93.o
+OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
+ mpeg12data.o mpegvideo.o
+OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
+OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
+OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
+OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
+OBJS-$(CONFIG_COOK_DECODER) += cook.o
+OBJS-$(CONFIG_CSCD_DECODER) += cscd.o
+OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
+OBJS-$(CONFIG_DCA_DECODER) += dca.o synth_filter.o dcadsp.o
+OBJS-$(CONFIG_DFA_DECODER) += dfa.o
+OBJS-$(CONFIG_DNXHD_DECODER) += dnxhddec.o dnxhddata.o
+OBJS-$(CONFIG_DNXHD_ENCODER) += dnxhdenc.o dnxhddata.o \
+ mpegvideo_enc.o motion_est.o \
+ ratecontrol.o mpeg12data.o \
+ mpegvideo.o
+OBJS-$(CONFIG_DPX_DECODER) += dpx.o
+OBJS-$(CONFIG_DPX_ENCODER) += dpxenc.o
+OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinav.o
+OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinav.o
+OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
+OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
+OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
+OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
+OBJS-$(CONFIG_DVVIDEO_DECODER) += dv.o dvdata.o
+OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o dvdata.o
+OBJS-$(CONFIG_DXA_DECODER) += dxa.o
+OBJS-$(CONFIG_EAC3_DECODER) += eac3dec.o eac3dec_data.o
+OBJS-$(CONFIG_EACMV_DECODER) += eacmv.o
+OBJS-$(CONFIG_EAMAD_DECODER) += eamad.o eaidct.o mpeg12.o \
+ mpeg12data.o mpegvideo.o \
+ error_resilience.o
+OBJS-$(CONFIG_EATGQ_DECODER) += eatgq.o eaidct.o
+OBJS-$(CONFIG_EATGV_DECODER) += eatgv.o
+OBJS-$(CONFIG_EATQI_DECODER) += eatqi.o eaidct.o mpeg12.o \
+ mpeg12data.o mpegvideo.o \
+ error_resilience.o
+OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
+OBJS-$(CONFIG_EIGHTSVX_EXP_DECODER) += 8svx.o
+OBJS-$(CONFIG_EIGHTSVX_FIB_DECODER) += 8svx.o
+OBJS-$(CONFIG_ESCAPE124_DECODER) += escape124.o
+OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o rangecoder.o
+OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o rangecoder.o
+OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
+OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
+OBJS-$(CONFIG_FLAC_DECODER) += flacdec.o flacdata.o flac.o vorbis_data.o
+OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o flacdata.o flac.o
+OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
+OBJS-$(CONFIG_FLASHSV_ENCODER) += flashsvenc.o
+OBJS-$(CONFIG_FLIC_DECODER) += flicvideo.o
+OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
+OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
+OBJS-$(CONFIG_FRWU_DECODER) += frwu.o
+OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
+OBJS-$(CONFIG_GIF_ENCODER) += gif.o lzwenc.o
+OBJS-$(CONFIG_GSM_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
+OBJS-$(CONFIG_GSM_MS_DECODER) += gsmdec.o gsmdec_data.o msgsmdec.o
+OBJS-$(CONFIG_H261_DECODER) += h261dec.o h261.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_H261_ENCODER) += h261enc.o h261.o \
+ mpegvideo_enc.o motion_est.o \
+ ratecontrol.o mpeg12data.o \
+ mpegvideo.o
+OBJS-$(CONFIG_H263_DECODER) += h263dec.o h263.o ituh263dec.o \
+ mpeg4video.o mpeg4videodec.o flvdec.o\
+ intelh263dec.o mpegvideo.o \
+ error_resilience.o
+OBJS-$(CONFIG_H263_VAAPI_HWACCEL) += vaapi_mpeg4.o
+OBJS-$(CONFIG_H263_ENCODER) += mpegvideo_enc.o mpeg4video.o \
+ mpeg4videoenc.o motion_est.o \
+ ratecontrol.o h263.o ituh263enc.o \
+ flvenc.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_H264_DECODER) += h264.o h264_hl_motion.o \
+ h264_loopfilter.o h264_direct.o \
+ cabac.o h264_sei.o h264_ps.o \
+ h264_refs.o h264_cavlc.o h264_cabac.o\
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
+OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
+OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o
+OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o
+OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
+OBJS-$(CONFIG_IFF_BYTERUN1_DECODER) += iff.o
+OBJS-$(CONFIG_IFF_ILBM_DECODER) += iff.o
+OBJS-$(CONFIG_IMC_DECODER) += imc.o
+OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
+OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
+OBJS-$(CONFIG_INDEO5_DECODER) += indeo5.o ivi_common.o ivi_dsp.o
+OBJS-$(CONFIG_INTERPLAY_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
+OBJS-$(CONFIG_JPEGLS_DECODER) += jpeglsdec.o jpegls.o \
+ mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_JPEGLS_ENCODER) += jpeglsenc.o jpegls.o
+OBJS-$(CONFIG_JV_DECODER) += jvdec.o
+OBJS-$(CONFIG_KGV1_DECODER) += kgv1dec.o
+OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
+OBJS-$(CONFIG_LAGARITH_DECODER) += lagarith.o lagarithrac.o
+OBJS-$(CONFIG_LJPEG_ENCODER) += ljpegenc.o mjpegenc.o mjpeg.o \
+ mpegvideo_enc.o motion_est.o \
+ ratecontrol.o mpeg12data.o \
+ mpegvideo.o
+OBJS-$(CONFIG_LOCO_DECODER) += loco.o
+OBJS-$(CONFIG_MACE3_DECODER) += mace.o
+OBJS-$(CONFIG_MACE6_DECODER) += mace.o
+OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MIMIC_DECODER) += mimic.o
+OBJS-$(CONFIG_MJPEG_DECODER) += mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_MJPEG_ENCODER) += mjpegenc.o mjpeg.o \
+ mpegvideo_enc.o motion_est.o \
+ ratecontrol.o mpeg12data.o \
+ mpegvideo.o
+OBJS-$(CONFIG_MJPEGB_DECODER) += mjpegbdec.o mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_MLP_DECODER) += mlpdec.o mlpdsp.o
+OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
+OBJS-$(CONFIG_MOTIONPIXELS_DECODER) += motionpixels.o
+OBJS-$(CONFIG_MP1_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP1FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP2_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP2_ENCODER) += mpegaudioenc.o mpegaudio.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_MP2FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP3ADU_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP3ADUFLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o \
+ mpeg4audio.o
+OBJS-$(CONFIG_MP3ON4FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o \
+ mpeg4audio.o
+OBJS-$(CONFIG_MP3_DECODER) += mpegaudiodec.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MP3FLOAT_DECODER) += mpegaudiodec_float.o mpegaudiodecheader.o \
+ mpegaudio.o mpegaudiodata.o
+OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o mpegaudiodec.o \
+ mpegaudiodecheader.o mpegaudio.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o mpegaudiodec.o \
+ mpegaudiodecheader.o mpegaudio.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MPEG_XVMC_DECODER) += mpegvideo_xvmc.o
+OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpegvideo_enc.o \
+ motion_est.o ratecontrol.o \
+ mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
+OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
+OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpegvideo_enc.o \
+ motion_est.o ratecontrol.o \
+ mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_MPEG4_VAAPI_HWACCEL) += vaapi_mpeg4.o
+OBJS-$(CONFIG_MSMPEG4V1_DECODER) += msmpeg4.o msmpeg4data.o
+OBJS-$(CONFIG_MSMPEG4V1_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
+ h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V2_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
+ h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V2_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
+ h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V3_DECODER) += msmpeg4.o msmpeg4data.o h263dec.o \
+ h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSMPEG4V3_ENCODER) += msmpeg4.o msmpeg4data.o h263dec.o \
+ h263.o ituh263dec.o mpeg4videodec.o
+OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o msrledec.o
+OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
+OBJS-$(CONFIG_MSZH_DECODER) += lcldec.o
+OBJS-$(CONFIG_NELLYMOSER_DECODER) += nellymoserdec.o nellymoser.o
+OBJS-$(CONFIG_NELLYMOSER_ENCODER) += nellymoserenc.o nellymoser.o
+OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o
+OBJS-$(CONFIG_PAM_DECODER) += pnmdec.o pnm.o
+OBJS-$(CONFIG_PAM_ENCODER) += pamenc.o pnm.o
+OBJS-$(CONFIG_PBM_DECODER) += pnmdec.o pnm.o
+OBJS-$(CONFIG_PBM_ENCODER) += pnmenc.o pnm.o
+OBJS-$(CONFIG_PCX_DECODER) += pcx.o
+OBJS-$(CONFIG_PCX_ENCODER) += pcxenc.o
+OBJS-$(CONFIG_PGM_DECODER) += pnmdec.o pnm.o
+OBJS-$(CONFIG_PGM_ENCODER) += pnmenc.o pnm.o
+OBJS-$(CONFIG_PGMYUV_DECODER) += pnmdec.o pnm.o
+OBJS-$(CONFIG_PGMYUV_ENCODER) += pnmenc.o pnm.o
+OBJS-$(CONFIG_PGSSUB_DECODER) += pgssubdec.o
+OBJS-$(CONFIG_PICTOR_DECODER) += pictordec.o cga_data.o
+OBJS-$(CONFIG_PNG_DECODER) += png.o pngdec.o
+OBJS-$(CONFIG_PNG_ENCODER) += png.o pngenc.o
+OBJS-$(CONFIG_PPM_DECODER) += pnmdec.o pnm.o
+OBJS-$(CONFIG_PPM_ENCODER) += pnmenc.o pnm.o
+OBJS-$(CONFIG_PTX_DECODER) += ptx.o
+OBJS-$(CONFIG_QCELP_DECODER) += qcelpdec.o celp_math.o \
+ celp_filters.o acelp_vectors.o \
+ acelp_filters.o
+OBJS-$(CONFIG_QDM2_DECODER) += qdm2.o mpegaudiodec.o \
+ mpegaudiodecheader.o mpegaudio.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_QDRAW_DECODER) += qdrw.o
+OBJS-$(CONFIG_QPEG_DECODER) += qpeg.o
+OBJS-$(CONFIG_QTRLE_DECODER) += qtrle.o
+OBJS-$(CONFIG_QTRLE_ENCODER) += qtrleenc.o
+OBJS-$(CONFIG_R10K_DECODER) += r210dec.o
+OBJS-$(CONFIG_R210_DECODER) += r210dec.o
+OBJS-$(CONFIG_RA_144_DECODER) += ra144dec.o ra144.o celp_filters.o
+OBJS-$(CONFIG_RA_144_ENCODER) += ra144enc.o ra144.o celp_filters.o
+OBJS-$(CONFIG_RA_288_DECODER) += ra288.o celp_math.o celp_filters.o
+OBJS-$(CONFIG_RAWVIDEO_DECODER) += rawdec.o
+OBJS-$(CONFIG_RAWVIDEO_ENCODER) += rawenc.o
+OBJS-$(CONFIG_RL2_DECODER) += rl2.o
+OBJS-$(CONFIG_ROQ_DECODER) += roqvideodec.o roqvideo.o
+OBJS-$(CONFIG_ROQ_ENCODER) += roqvideoenc.o roqvideo.o elbg.o
+OBJS-$(CONFIG_ROQ_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_ROQ_DPCM_ENCODER) += roqaudioenc.o
+OBJS-$(CONFIG_RPZA_DECODER) += rpza.o
+OBJS-$(CONFIG_RV10_DECODER) += rv10.o
+OBJS-$(CONFIG_RV10_ENCODER) += rv10enc.o
+OBJS-$(CONFIG_RV20_DECODER) += rv10.o
+OBJS-$(CONFIG_RV20_ENCODER) += rv20enc.o
+OBJS-$(CONFIG_RV30_DECODER) += rv30.o rv34.o rv30dsp.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_RV40_DECODER) += rv40.o rv34.o rv40dsp.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_SGI_DECODER) += sgidec.o
+OBJS-$(CONFIG_SGI_ENCODER) += sgienc.o rle.o
+OBJS-$(CONFIG_SHORTEN_DECODER) += shorten.o
+OBJS-$(CONFIG_SIPR_DECODER) += sipr.o acelp_pitch_delay.o \
+ celp_math.o acelp_vectors.o \
+ acelp_filters.o celp_filters.o \
+ sipr16k.o
+OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
+OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
+OBJS-$(CONFIG_SMC_DECODER) += smc.o
+OBJS-$(CONFIG_SNOW_DECODER) += snow.o rangecoder.o
+OBJS-$(CONFIG_SNOW_ENCODER) += snow.o rangecoder.o motion_est.o \
+ ratecontrol.o h263.o \
+ mpegvideo.o error_resilience.o \
+ ituh263enc.o mpegvideo_enc.o \
+ mpeg12data.o
+OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
+OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
+OBJS-$(CONFIG_SONIC_LS_ENCODER) += sonic.o
+OBJS-$(CONFIG_SP5X_DECODER) += sp5xdec.o mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_SRT_DECODER) += srtdec.o ass.o
+OBJS-$(CONFIG_SRT_ENCODER) += srtenc.o ass_split.o
+OBJS-$(CONFIG_SUNRAST_DECODER) += sunrast.o
+OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o h263.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o \
+ motion_est.o h263.o \
+ mpegvideo.o error_resilience.o \
+ ituh263enc.o mpegvideo_enc.o \
+ ratecontrol.o mpeg12data.o
+OBJS-$(CONFIG_SVQ3_DECODER) += h264.o svq3.o h264_hl_motion.o \
+ h264_loopfilter.o h264_direct.o \
+ h264_sei.o h264_ps.o h264_refs.o \
+ h264_cavlc.o h264_cabac.o cabac.o \
+ mpegvideo.o error_resilience.o \
+ svq1dec.o svq1.o h263.o
+OBJS-$(CONFIG_TARGA_DECODER) += targa.o
+OBJS-$(CONFIG_TARGA_ENCODER) += targaenc.o rle.o
+OBJS-$(CONFIG_THEORA_DECODER) += xiph.o
+OBJS-$(CONFIG_THP_DECODER) += mjpegdec.o mjpeg.o
+OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
+OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o faxcompr.o
+OBJS-$(CONFIG_TIFF_ENCODER) += tiffenc.o rle.o lzwenc.o
+OBJS-$(CONFIG_TMV_DECODER) += tmv.o cga_data.o
+OBJS-$(CONFIG_TRUEMOTION1_DECODER) += truemotion1.o
+OBJS-$(CONFIG_TRUEMOTION2_DECODER) += truemotion2.o
+OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
+OBJS-$(CONFIG_TSCC_DECODER) += tscc.o msrledec.o
+OBJS-$(CONFIG_TTA_DECODER) += tta.o
+OBJS-$(CONFIG_TWINVQ_DECODER) += twinvq.o celp_math.o
+OBJS-$(CONFIG_TXD_DECODER) += txd.o s3tc.o
+OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
+OBJS-$(CONFIG_V210_DECODER) += v210dec.o
+OBJS-$(CONFIG_V210_ENCODER) += v210enc.o
+OBJS-$(CONFIG_V210X_DECODER) += v210x.o
+OBJS-$(CONFIG_VB_DECODER) += vb.o
+OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1.o vc1data.o vc1dsp.o \
+ msmpeg4.o msmpeg4data.o \
+ intrax8.o intrax8dsp.o
+OBJS-$(CONFIG_VC1_DXVA2_HWACCEL) += dxva2_vc1.o
+OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
+OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
+OBJS-$(CONFIG_VCR1_ENCODER) += vcr1.o
+OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
+OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
+OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
+OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbis.o \
+ vorbis_data.o xiph.o
+OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
+ vorbis_data.o
+OBJS-$(CONFIG_VP3_DECODER) += vp3.o vp3dsp.o
+OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56dsp.o \
+ vp3dsp.o vp56rac.o
+OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o vp56dsp.o \
+ vp3dsp.o vp6dsp.o vp56rac.o
+OBJS-$(CONFIG_VP8_DECODER) += vp8.o vp8dsp.o vp56rac.o
+OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
+OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
+OBJS-$(CONFIG_WMAPRO_DECODER) += wmaprodec.o wma.o
+OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o wma.o aactab.o
+OBJS-$(CONFIG_WMAV1_ENCODER) += wmaenc.o wma.o aactab.o
+OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o wma.o aactab.o
+OBJS-$(CONFIG_WMAV2_ENCODER) += wmaenc.o wma.o aactab.o
+OBJS-$(CONFIG_WMAVOICE_DECODER) += wmavoice.o \
+ celp_math.o celp_filters.o \
+ acelp_vectors.o acelp_filters.o
+OBJS-$(CONFIG_WMV1_DECODER) += msmpeg4.o msmpeg4data.o
+OBJS-$(CONFIG_WMV2_DECODER) += wmv2dec.o wmv2.o \
+ msmpeg4.o msmpeg4data.o \
+ intrax8.o intrax8dsp.o
+OBJS-$(CONFIG_WMV2_ENCODER) += wmv2enc.o wmv2.o \
+ msmpeg4.o msmpeg4data.o \
+ mpeg4videodec.o ituh263dec.o h263dec.o
+OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
+OBJS-$(CONFIG_WS_SND1_DECODER) += ws-snd1.o
+OBJS-$(CONFIG_XAN_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_XAN_WC3_DECODER) += xan.o
+OBJS-$(CONFIG_XAN_WC4_DECODER) += xxan.o
+OBJS-$(CONFIG_XL_DECODER) += xl.o
+OBJS-$(CONFIG_XSUB_DECODER) += xsubdec.o
+OBJS-$(CONFIG_XSUB_ENCODER) += xsubenc.o
+OBJS-$(CONFIG_YOP_DECODER) += yop.o
+OBJS-$(CONFIG_ZLIB_DECODER) += lcldec.o
+OBJS-$(CONFIG_ZLIB_ENCODER) += lclenc.o
+OBJS-$(CONFIG_ZMBV_DECODER) += zmbv.o
+OBJS-$(CONFIG_ZMBV_ENCODER) += zmbvenc.o
+
+# (AD)PCM decoders/encoders
+OBJS-$(CONFIG_PCM_ALAW_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_ALAW_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_BLURAY_DECODER) += pcm-mpeg.o
+OBJS-$(CONFIG_PCM_DVD_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_DVD_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_F32BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_F32BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_F32LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_F32LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_F64BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_F64BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_F64LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_F64LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_LXF_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_MULAW_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_MULAW_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S8_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S8_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16LE_PLANAR_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24DAUD_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24DAUD_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U8_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U8_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_ZORK_ENCODER) += pcm.o
+
+OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adxdec.o
+OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adxenc.o
+OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_MAXIS_XA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_R1_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_R2_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_R3_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_XAS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_G722_DECODER) += g722.o
+OBJS-$(CONFIG_ADPCM_G722_ENCODER) += g722.o
+OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
+OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
+OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_EA_EACS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_EA_SEAD_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_ISS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_THP_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcm.o
+
+# libavformat dependencies
+OBJS-$(CONFIG_ADTS_MUXER) += mpeg4audio.o
+OBJS-$(CONFIG_CAF_DEMUXER) += mpeg4audio.o mpegaudiodata.o
+OBJS-$(CONFIG_DV_DEMUXER) += dvdata.o
+OBJS-$(CONFIG_DV_MUXER) += dvdata.o
+OBJS-$(CONFIG_FLAC_DEMUXER) += flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_FLAC_MUXER) += flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_FLV_DEMUXER) += mpeg4audio.o
+OBJS-$(CONFIG_GXF_DEMUXER) += mpeg12data.o
+OBJS-$(CONFIG_IFF_DEMUXER) += iff.o
+OBJS-$(CONFIG_MATROSKA_AUDIO_MUXER) += xiph.o mpeg4audio.o \
+ flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_MATROSKA_DEMUXER) += mpeg4audio.o mpegaudiodata.o
+OBJS-$(CONFIG_MATROSKA_MUXER) += xiph.o mpeg4audio.o \
+ flacdec.o flacdata.o flac.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_MOV_DEMUXER) += mpeg4audio.o mpegaudiodata.o
+OBJS-$(CONFIG_MOV_MUXER) += mpeg4audio.o mpegaudiodata.o
+OBJS-$(CONFIG_MPEGTS_MUXER) += mpegvideo.o mpeg4audio.o
+OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpeg4audio.o mpegaudiodata.o
+OBJS-$(CONFIG_NUT_MUXER) += mpegaudiodata.o
+OBJS-$(CONFIG_OGG_DEMUXER) += flacdec.o flacdata.o flac.o \
+ dirac.o mpeg12data.o
+OBJS-$(CONFIG_OGG_MUXER) += xiph.o flacdec.o flacdata.o flac.o
+OBJS-$(CONFIG_RTP_MUXER) += mpegvideo.o xiph.o
+OBJS-$(CONFIG_SPDIF_DEMUXER) += aacadtsdec.o mpeg4audio.o
+OBJS-$(CONFIG_WEBM_MUXER) += xiph.o mpeg4audio.o \
+ flacdec.o flacdata.o flac.o \
+ mpegaudiodata.o
+
+# external codec libraries
+OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o
+OBJS-$(CONFIG_LIBDIRAC_DECODER) += libdiracdec.o
+OBJS-$(CONFIG_LIBDIRAC_ENCODER) += libdiracenc.o libdirac_libschro.o
+OBJS-$(CONFIG_LIBFAAC_ENCODER) += libfaac.o
+OBJS-$(CONFIG_LIBGSM_DECODER) += libgsm.o
+OBJS-$(CONFIG_LIBGSM_ENCODER) += libgsm.o
+OBJS-$(CONFIG_LIBGSM_MS_DECODER) += libgsm.o
+OBJS-$(CONFIG_LIBGSM_MS_ENCODER) += libgsm.o
+OBJS-$(CONFIG_LIBMP3LAME_ENCODER) += libmp3lame.o
+OBJS-$(CONFIG_LIBOPENCORE_AMRNB_DECODER) += libopencore-amr.o
+OBJS-$(CONFIG_LIBOPENCORE_AMRNB_ENCODER) += libopencore-amr.o
+OBJS-$(CONFIG_LIBOPENCORE_AMRWB_DECODER) += libopencore-amr.o
+OBJS-$(CONFIG_LIBOPENJPEG_DECODER) += libopenjpeg.o
+OBJS-$(CONFIG_LIBSCHROEDINGER_DECODER) += libschroedingerdec.o \
+ libschroedinger.o \
+ libdirac_libschro.o
+OBJS-$(CONFIG_LIBSCHROEDINGER_ENCODER) += libschroedingerenc.o \
+ libschroedinger.o \
+ libdirac_libschro.o
+OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
+OBJS-$(CONFIG_LIBTHEORA_ENCODER) += libtheoraenc.o
+OBJS-$(CONFIG_LIBVO_AACENC_ENCODER) += libvo-aacenc.o mpeg4audio.o
+OBJS-$(CONFIG_LIBVO_AMRWBENC_ENCODER) += libvo-amrwbenc.o
+OBJS-$(CONFIG_LIBVORBIS_ENCODER) += libvorbis.o vorbis_data.o
+OBJS-$(CONFIG_LIBVPX_DECODER) += libvpxdec.o
+OBJS-$(CONFIG_LIBVPX_ENCODER) += libvpxenc.o
+OBJS-$(CONFIG_LIBX264_ENCODER) += libx264.o
+OBJS-$(CONFIG_LIBXAVS_ENCODER) += libxavs.o
+OBJS-$(CONFIG_LIBXVID) += libxvidff.o libxvid_rc.o
+
+# parsers
+OBJS-$(CONFIG_AAC_PARSER) += aac_parser.o aac_ac3_parser.o \
+ aacadtsdec.o mpeg4audio.o
+OBJS-$(CONFIG_AC3_PARSER) += ac3_parser.o ac3tab.o \
+ aac_ac3_parser.o
+OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs_parser.o
+OBJS-$(CONFIG_DCA_PARSER) += dca_parser.o
+OBJS-$(CONFIG_DIRAC_PARSER) += dirac_parser.o
+OBJS-$(CONFIG_DNXHD_PARSER) += dnxhd_parser.o
+OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsub_parser.o
+OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsub_parser.o
+OBJS-$(CONFIG_FLAC_PARSER) += flac_parser.o flacdata.o flac.o
+OBJS-$(CONFIG_H261_PARSER) += h261_parser.o
+OBJS-$(CONFIG_H263_PARSER) += h263_parser.o
+OBJS-$(CONFIG_H264_PARSER) += h264_parser.o h264.o h264_hl_motion.o \
+ cabac.o \
+ h264_refs.o h264_sei.o h264_direct.o \
+ h264_loopfilter.o h264_cabac.o \
+ h264_cavlc.o h264_ps.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_AAC_LATM_PARSER) += latm_parser.o
+OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg_parser.o
+OBJS-$(CONFIG_MLP_PARSER) += mlp_parser.o mlp.o
+OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += mpeg4video_parser.o h263.o \
+ mpegvideo.o error_resilience.o \
+ mpeg4videodec.o mpeg4video.o \
+ ituh263dec.o h263dec.o
+OBJS-$(CONFIG_MPEGAUDIO_PARSER) += mpegaudio_parser.o \
+ mpegaudiodecheader.o mpegaudiodata.o
+OBJS-$(CONFIG_MPEGVIDEO_PARSER) += mpegvideo_parser.o \
+ mpeg12.o mpeg12data.o \
+ mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_PNM_PARSER) += pnm_parser.o pnm.o
+OBJS-$(CONFIG_VC1_PARSER) += vc1_parser.o vc1.o vc1data.o \
+ msmpeg4.o msmpeg4data.o mpeg4video.o \
+ h263.o mpegvideo.o error_resilience.o
+OBJS-$(CONFIG_VP3_PARSER) += vp3_parser.o
+OBJS-$(CONFIG_VP8_PARSER) += vp8_parser.o
+
+# bitstream filters
+OBJS-$(CONFIG_AAC_ADTSTOASC_BSF) += aac_adtstoasc_bsf.o aacadtsdec.o \
+ mpeg4audio.o
+OBJS-$(CONFIG_CHOMP_BSF) += chomp_bsf.o
+OBJS-$(CONFIG_DUMP_EXTRADATA_BSF) += dump_extradata_bsf.o
+OBJS-$(CONFIG_H264_MP4TOANNEXB_BSF) += h264_mp4toannexb_bsf.o
+OBJS-$(CONFIG_IMX_DUMP_HEADER_BSF) += imx_dump_header_bsf.o
+OBJS-$(CONFIG_MJPEG2JPEG_BSF) += mjpeg2jpeg_bsf.o mjpeg.o
+OBJS-$(CONFIG_MJPEGA_DUMP_HEADER_BSF) += mjpega_dump_header_bsf.o
+OBJS-$(CONFIG_MOV2TEXTSUB_BSF) += movsub_bsf.o
+OBJS-$(CONFIG_MP3_HEADER_COMPRESS_BSF) += mp3_header_compress_bsf.o
+OBJS-$(CONFIG_MP3_HEADER_DECOMPRESS_BSF) += mp3_header_decompress_bsf.o \
+ mpegaudiodata.o
+OBJS-$(CONFIG_NOISE_BSF) += noise_bsf.o
+OBJS-$(CONFIG_REMOVE_EXTRADATA_BSF) += remove_extradata_bsf.o
+OBJS-$(CONFIG_TEXT2MOVSUB_BSF) += movsub_bsf.o
+
+# thread libraries
+OBJS-$(HAVE_PTHREADS) += pthread.o
+OBJS-$(HAVE_W32THREADS) += w32thread.o
+
+OBJS-$(CONFIG_MLIB) += mlib/dsputil_mlib.o \
+
+# inverse.o contains the ff_inverse table definition, which is used by
+# the FASTDIV macro (from libavutil); since referencing the external
+# table has a negative effect on performance, copy it in libavcodec as
+# well.
+OBJS-$(!CONFIG_SMALL) += inverse.o
+
+-include $(SUBDIR)$(ARCH)/Makefile
+
+SKIPHEADERS = %_tablegen.h
+SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
+SKIPHEADERS-$(CONFIG_LIBDIRAC) += libdirac.h
+SKIPHEADERS-$(CONFIG_LIBSCHROEDINGER) += libschroedinger.h
+SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_internal.h
+SKIPHEADERS-$(CONFIG_VDPAU) += vdpau.h
+SKIPHEADERS-$(CONFIG_XVMC) += xvmc.h
+SKIPHEADERS += mpegaudio3.h
+
+EXAMPLES = api
+
+TESTPROGS = cabac dct eval fft fft-fixed h264 iirfilter rangecoder snow
+TESTPROGS-$(HAVE_MMX) += motion
+TESTOBJS = dctref.o
+
+HOSTPROGS = costablegen
+
+DIRS = alpha arm bfin mlib ppc ps2 sh4 sparc x86
+
+CLEANFILES = sin_tables.c cos_tables.c *_tables.h *_tablegen$(HOSTEXESUF)
+
+include $(SUBDIR)../subdir.mak
+
+$(SUBDIR)dct-test$(EXESUF): $(SUBDIR)dctref.o
+
+$(SUBDIR)cos_tables.c: $(SUBDIR)costablegen$(HOSTEXESUF)
+ $(M)./$< > $@
+
+$(SUBDIR)cos_fixed_tables.c: $(SUBDIR)costablegen$(HOSTEXESUF)
+ $(M)./$< cos fixed > $@
+
+$(SUBDIR)sin_tables.c: $(SUBDIR)costablegen$(HOSTEXESUF)
+ $(M)./$< sin > $@
+
+ifdef CONFIG_MPEGAUDIO_HP
+$(SUBDIR)mpegaudio_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DFRAC_BITS=23
+$(SUBDIR)mpegaudio_tablegen.ho: CPPFLAGS += -DFRAC_BITS=23
+else
+$(SUBDIR)mpegaudio_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DFRAC_BITS=15
+$(SUBDIR)mpegaudio_tablegen.ho: CPPFLAGS += -DFRAC_BITS=15
+endif
+
+ifdef CONFIG_SMALL
+$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=1
+else
+$(SUBDIR)%_tablegen$(HOSTEXESUF): HOSTCFLAGS += -DCONFIG_SMALL=0
+endif
+
+$(SUBDIR)%_tablegen$(HOSTEXESUF): $(SUBDIR)%_tablegen.c $(SUBDIR)%_tablegen.h $(SUBDIR)tableprint.c
+ $(HOSTCC) $(HOSTCFLAGS) $(HOSTLDFLAGS) -o $@ $(filter %.c,$^) $(HOSTLIBS)
+
+GEN_HEADERS = cbrt_tables.h aacps_tables.h aac_tables.h dv_tables.h \
+ sinewin_tables.h mpegaudio_tables.h motionpixels_tables.h \
+ pcm_tables.h qdm2_tables.h
+GEN_HEADERS := $(addprefix $(SUBDIR), $(GEN_HEADERS))
+
+$(GEN_HEADERS): $(SUBDIR)%_tables.h: $(SUBDIR)%_tablegen$(HOSTEXESUF)
+ $(M)./$< > $@
+
+ifdef CONFIG_HARDCODED_TABLES
+$(SUBDIR)aacdec.o: $(SUBDIR)cbrt_tables.h
+$(SUBDIR)aacps.o: $(SUBDIR)aacps_tables.h
+$(SUBDIR)aactab.o: $(SUBDIR)aac_tables.h
+$(SUBDIR)dv.o: $(SUBDIR)dv_tables.h
+$(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h
+$(SUBDIR)mpegaudiodec.o: $(SUBDIR)mpegaudio_tables.h
+$(SUBDIR)mpegaudiodec_float.o: $(SUBDIR)mpegaudio_tables.h
+$(SUBDIR)motionpixels.o: $(SUBDIR)motionpixels_tables.h
+$(SUBDIR)pcm.o: $(SUBDIR)pcm_tables.h
+$(SUBDIR)qdm2.o: $(SUBDIR)qdm2_tables.h
+endif
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/a64colors.h b/samples/rtsp_player/ffmpeg/libavcodec/a64colors.h
new file mode 100755
index 0000000..a9cdb6f
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/a64colors.h
@@ -0,0 +1,52 @@
+/*
+ * a64 video encoder - c64 colors in rgb (Pepto)
+ * Copyright (c) 2009 Tobias Bindhammer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a64 video encoder - c64 colors in rgb
+ */
+
+#ifndef AVCODEC_A64COLORS_H
+#define AVCODEC_A64COLORS_H
+
+#include <stdint.h>
+
+/* c64 palette in RGB */
+static const uint8_t a64_palette[16][3] = {
+ {0x00, 0x00, 0x00},
+ {0xff, 0xff, 0xff},
+ {0x68, 0x37, 0x2b},
+ {0x70, 0xa4, 0xb2},
+ {0x6f, 0x3d, 0x86},
+ {0x58, 0x8d, 0x43},
+ {0x35, 0x28, 0x79},
+ {0xb8, 0xc7, 0x6f},
+ {0x6f, 0x4f, 0x25},
+ {0x43, 0x39, 0x00},
+ {0x9a, 0x67, 0x59},
+ {0x44, 0x44, 0x44},
+ {0x6c, 0x6c, 0x6c},
+ {0x9a, 0xd2, 0x84},
+ {0x6c, 0x5e, 0xb5},
+ {0x95, 0x95, 0x95},
+};
+
+#endif /* AVCODEC_A64COLORS_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/a64enc.h b/samples/rtsp_player/ffmpeg/libavcodec/a64enc.h
new file mode 100755
index 0000000..fb559ba
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/a64enc.h
@@ -0,0 +1,55 @@
+/*
+ * a64 video encoder - basic headers
+ * Copyright (c) 2009 Tobias Bindhammer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a64 video encoder - basic headers
+ */
+
+#ifndef AVCODEC_A64ENC_H
+#define AVCODEC_A64ENC_H
+
+#include "libavutil/lfg.h"
+#include "avcodec.h"
+
+#define C64XRES 320
+#define C64YRES 200
+
+typedef struct A64Context {
+ /* general variables */
+ AVFrame picture;
+
+ /* variables for multicolor modes */
+ AVLFG randctx;
+ int mc_lifetime;
+ int mc_use_5col;
+ int mc_frame_counter;
+ int *mc_meta_charset;
+ int *mc_charmap;
+ int *mc_best_cb;
+ int mc_luma_vals[5];
+ uint8_t *mc_charset;
+ uint8_t *mc_colram;
+ uint8_t *mc_palette;
+ int mc_pal_size;
+} A64Context;
+
+#endif /* AVCODEC_A64ENC_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/a64multienc.c b/samples/rtsp_player/ffmpeg/libavcodec/a64multienc.c
new file mode 100755
index 0000000..aed28ad
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/a64multienc.c
@@ -0,0 +1,389 @@
+/*
+ * a64 video encoder - multicolor modes
+ * Copyright (c) 2009 Tobias Bindhammer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a64 video encoder - multicolor modes
+ */
+
+#include "a64enc.h"
+#include "a64colors.h"
+#include "a64tables.h"
+#include "elbg.h"
+#include "libavutil/intreadwrite.h"
+
+#define DITHERSTEPS 8
+#define CHARSET_CHARS 256
+#define INTERLACED 1
+#define CROP_SCREENS 1
+
+/* gray gradient */
+static const int mc_colors[5]={0x0,0xb,0xc,0xf,0x1};
+
+/* other possible gradients - to be tested */
+//static const int mc_colors[5]={0x0,0x8,0xa,0xf,0x7};
+//static const int mc_colors[5]={0x0,0x9,0x8,0xa,0x3};
+
+static void to_meta_with_crop(AVCodecContext *avctx, AVFrame *p, int *dest)
+{
+ int blockx, blocky, x, y;
+ int luma = 0;
+ int height = FFMIN(avctx->height, C64YRES);
+ int width = FFMIN(avctx->width , C64XRES);
+ uint8_t *src = p->data[0];
+
+ for (blocky = 0; blocky < C64YRES; blocky += 8) {
+ for (blockx = 0; blockx < C64XRES; blockx += 8) {
+ for (y = blocky; y < blocky + 8 && y < C64YRES; y++) {
+ for (x = blockx; x < blockx + 8 && x < C64XRES; x += 2) {
+ if(x < width && y < height) {
+ /* build average over 2 pixels */
+ luma = (src[(x + 0 + y * p->linesize[0])] +
+ src[(x + 1 + y * p->linesize[0])]) / 2;
+ /* write blocks as linear data now so they are suitable for elbg */
+ dest[0] = luma;
+ }
+ dest++;
+ }
+ }
+ }
+ }
+}
+
+static void render_charset(AVCodecContext *avctx, uint8_t *charset,
+ uint8_t *colrammap)
+{
+ A64Context *c = avctx->priv_data;
+ uint8_t row1, row2;
+ int charpos, x, y;
+ int a, b;
+ uint8_t pix;
+ int lowdiff, highdiff;
+ int *best_cb = c->mc_best_cb;
+ static uint8_t index1[256];
+ static uint8_t index2[256];
+ static uint8_t dither[256];
+ int i;
+ int distance;
+
+ /* generate lookup-tables for dither and index before looping */
+ i = 0;
+ for (a=0; a < 256; a++) {
+ if(i < c->mc_pal_size -1 && a == c->mc_luma_vals[i + 1]) {
+ distance = c->mc_luma_vals[i + 1] - c->mc_luma_vals[i];
+ for(b = 0; b <= distance; b++) {
+ dither[c->mc_luma_vals[i] + b] = b * (DITHERSTEPS - 1) / distance;
+ }
+ i++;
+ }
+ if(i >= c->mc_pal_size - 1) dither[a] = 0;
+ index1[a] = i;
+ index2[a] = FFMIN(i + 1, c->mc_pal_size - 1);
+ }
+
+ /* and render charset */
+ for (charpos = 0; charpos < CHARSET_CHARS; charpos++) {
+ lowdiff = 0;
+ highdiff = 0;
+ for (y = 0; y < 8; y++) {
+ row1 = 0; row2 = 0;
+ for (x = 0; x < 4; x++) {
+ pix = best_cb[y * 4 + x];
+
+ /* accumulate error for brightest/darkest color */
+ if (index1[pix] >= 3)
+ highdiff += pix - c->mc_luma_vals[3];
+ if (index1[pix] < 1)
+ lowdiff += c->mc_luma_vals[1] - pix;
+
+ row1 <<= 2;
+
+ if (INTERLACED) {
+ row2 <<= 2;
+ if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 0][x & 3])
+ row1 |= 3-(index2[pix] & 3);
+ else
+ row1 |= 3-(index1[pix] & 3);
+
+ if (interlaced_dither_patterns[dither[pix]][(y & 3) * 2 + 1][x & 3])
+ row2 |= 3-(index2[pix] & 3);
+ else
+ row2 |= 3-(index1[pix] & 3);
+ }
+ else {
+ if (multi_dither_patterns[dither[pix]][(y & 3)][x & 3])
+ row1 |= 3-(index2[pix] & 3);
+ else
+ row1 |= 3-(index1[pix] & 3);
+ }
+ }
+ charset[y+0x000] = row1;
+ if (INTERLACED) charset[y+0x800] = row2;
+ }
+ /* do we need to adjust pixels? */
+ if (highdiff > 0 && lowdiff > 0 && c->mc_use_5col) {
+ if (lowdiff > highdiff) {
+ for (x = 0; x < 32; x++)
+ best_cb[x] = FFMIN(c->mc_luma_vals[3], best_cb[x]);
+ } else {
+ for (x = 0; x < 32; x++)
+ best_cb[x] = FFMAX(c->mc_luma_vals[1], best_cb[x]);
+ }
+ charpos--; /* redo now adjusted char */
+ /* no adjustment needed, all fine */
+ } else {
+ /* advance pointers */
+ best_cb += 32;
+ charset += 8;
+
+ /* remember colorram value */
+ colrammap[charpos] = (highdiff > 0);
+ }
+ }
+}
+
+static av_cold int a64multi_close_encoder(AVCodecContext *avctx)
+{
+ A64Context *c = avctx->priv_data;
+ av_free(c->mc_meta_charset);
+ av_free(c->mc_best_cb);
+ av_free(c->mc_charset);
+ av_free(c->mc_charmap);
+ av_free(c->mc_colram);
+ return 0;
+}
+
+static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
+{
+ A64Context *c = avctx->priv_data;
+ int a;
+ av_lfg_init(&c->randctx, 1);
+
+ if (avctx->global_quality < 1) {
+ c->mc_lifetime = 4;
+ } else {
+ c->mc_lifetime = avctx->global_quality /= FF_QP2LAMBDA;
+ }
+
+ av_log(avctx, AV_LOG_INFO, "charset lifetime set to %d frame(s)\n", c->mc_lifetime);
+
+ c->mc_frame_counter = 0;
+ c->mc_use_5col = avctx->codec->id == CODEC_ID_A64_MULTI5;
+ c->mc_pal_size = 4 + c->mc_use_5col;
+
+ /* precalc luma values for later use */
+ for (a = 0; a < c->mc_pal_size; a++) {
+ c->mc_luma_vals[a]=a64_palette[mc_colors[a]][0] * 0.30 +
+ a64_palette[mc_colors[a]][1] * 0.59 +
+ a64_palette[mc_colors[a]][2] * 0.11;
+ }
+
+ if (!(c->mc_meta_charset = av_malloc(32000 * c->mc_lifetime * sizeof(int))) ||
+ !(c->mc_best_cb = av_malloc(CHARSET_CHARS * 32 * sizeof(int))) ||
+ !(c->mc_charmap = av_mallocz(1000 * c->mc_lifetime * sizeof(int))) ||
+ !(c->mc_colram = av_mallocz(CHARSET_CHARS * sizeof(uint8_t))) ||
+ !(c->mc_charset = av_malloc(0x800 * (INTERLACED+1) * sizeof(uint8_t)))) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate buffer memory.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ /* set up extradata */
+ if (!(avctx->extradata = av_mallocz(8 * 4 + FF_INPUT_BUFFER_PADDING_SIZE))) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for extradata.\n");
+ return AVERROR(ENOMEM);
+ }
+ avctx->extradata_size = 8 * 4;
+ AV_WB32(avctx->extradata, c->mc_lifetime);
+ AV_WB32(avctx->extradata + 16, INTERLACED);
+
+ avcodec_get_frame_defaults(&c->picture);
+ avctx->coded_frame = &c->picture;
+ avctx->coded_frame->pict_type = FF_I_TYPE;
+ avctx->coded_frame->key_frame = 1;
+ if (!avctx->codec_tag)
+ avctx->codec_tag = AV_RL32("a64m");
+
+ return 0;
+}
+
+static void a64_compress_colram(unsigned char *buf, int *charmap, uint8_t *colram)
+{
+ int a;
+ uint8_t temp;
+ /* only needs to be done in 5col mode */
+ /* XXX could be squeezed to 0x80 bytes */
+ for (a = 0; a < 256; a++) {
+ temp = colram[charmap[a + 0x000]] << 0;
+ temp |= colram[charmap[a + 0x100]] << 1;
+ temp |= colram[charmap[a + 0x200]] << 2;
+ if (a < 0xe8) temp |= colram[charmap[a + 0x300]] << 3;
+ buf[a] = temp << 2;
+ }
+}
+
+static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
+ int buf_size, void *data)
+{
+ A64Context *c = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame *const p = (AVFrame *) & c->picture;
+
+ int frame;
+ int x, y;
+ int b_height;
+ int b_width;
+
+ int req_size;
+ int num_frames = c->mc_lifetime;
+
+ int *charmap = c->mc_charmap;
+ uint8_t *colram = c->mc_colram;
+ uint8_t *charset = c->mc_charset;
+ int *meta = c->mc_meta_charset;
+ int *best_cb = c->mc_best_cb;
+
+ int charset_size = 0x800 * (INTERLACED + 1);
+ int colram_size = 0x100 * c->mc_use_5col;
+ int screen_size;
+
+ if(CROP_SCREENS) {
+ b_height = FFMIN(avctx->height,C64YRES) >> 3;
+ b_width = FFMIN(avctx->width ,C64XRES) >> 3;
+ screen_size = b_width * b_height;
+ } else {
+ b_height = C64YRES >> 3;
+ b_width = C64XRES >> 3;
+ screen_size = 0x400;
+ }
+
+ /* no data, means end encoding asap */
+ if (!data) {
+ /* all done, end encoding */
+ if (!c->mc_lifetime) return 0;
+ /* no more frames in queue, prepare to flush remaining frames */
+ if (!c->mc_frame_counter) {
+ num_frames = c->mc_lifetime;
+ c->mc_lifetime = 0;
+ }
+ /* still frames in queue so limit lifetime to remaining frames */
+ else c->mc_lifetime = c->mc_frame_counter;
+ /* still new data available */
+ } else {
+ /* fill up mc_meta_charset with data until lifetime exceeds */
+ if (c->mc_frame_counter < c->mc_lifetime) {
+ *p = *pict;
+ p->pict_type = FF_I_TYPE;
+ p->key_frame = 1;
+ to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
+ c->mc_frame_counter++;
+ /* lifetime is not reached so wait for next frame first */
+ return 0;
+ }
+ }
+
+ /* lifetime reached so now convert X frames at once */
+ if (c->mc_frame_counter == c->mc_lifetime) {
+ req_size = 0;
+ /* any frames to encode? */
+ if (c->mc_lifetime) {
+ /* calc optimal new charset + charmaps */
+ ff_init_elbg(meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
+ ff_do_elbg (meta, 32, 1000 * c->mc_lifetime, best_cb, CHARSET_CHARS, 50, charmap, &c->randctx);
+
+ /* create colorram map and a c64 readable charset */
+ render_charset(avctx, charset, colram);
+
+ /* copy charset to buf */
+ memcpy(buf,charset, charset_size);
+
+ /* advance pointers */
+ buf += charset_size;
+ charset += charset_size;
+ req_size += charset_size;
+ }
+ /* no charset so clean buf */
+ else memset(buf, 0, charset_size);
+
+ /* write x frames to buf */
+ for (frame = 0; frame < c->mc_lifetime; frame++) {
+ /* copy charmap to buf. buf is uchar*, charmap is int*, so no memcpy here, sorry */
+ for (y = 0; y < b_height; y++) {
+ for (x = 0; x < b_width; x++) {
+ buf[y * b_width + x] = charmap[y * b_width + x];
+ }
+ }
+ /* advance pointers */
+ buf += screen_size;
+ req_size += screen_size;
+
+ /* compress and copy colram to buf */
+ if (c->mc_use_5col) {
+ a64_compress_colram(buf, charmap, colram);
+ /* advance pointers */
+ buf += colram_size;
+ req_size += colram_size;
+ }
+
+ /* advance to next charmap */
+ charmap += 1000;
+ }
+
+ AV_WB32(avctx->extradata + 4, c->mc_frame_counter);
+ AV_WB32(avctx->extradata + 8, charset_size);
+ AV_WB32(avctx->extradata + 12, screen_size + colram_size);
+
+ /* reset counter */
+ c->mc_frame_counter = 0;
+
+ if (req_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "buf size too small (need %d, got %d)\n", req_size, buf_size);
+ return -1;
+ }
+ return req_size;
+ }
+ return 0;
+}
+
+AVCodec ff_a64multi_encoder = {
+ .name = "a64multi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_A64_MULTI,
+ .priv_data_size = sizeof(A64Context),
+ .init = a64multi_init_encoder,
+ .encode = a64multi_encode_frame,
+ .close = a64multi_close_encoder,
+ .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
+ .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64"),
+ .capabilities = CODEC_CAP_DELAY,
+};
+
+AVCodec ff_a64multi5_encoder = {
+ .name = "a64multi5",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_A64_MULTI5,
+ .priv_data_size = sizeof(A64Context),
+ .init = a64multi_init_encoder,
+ .encode = a64multi_encode_frame,
+ .close = a64multi_close_encoder,
+ .pix_fmts = (const enum PixelFormat[]) {PIX_FMT_GRAY8, PIX_FMT_NONE},
+ .long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
+ .capabilities = CODEC_CAP_DELAY,
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/a64tables.h b/samples/rtsp_player/ffmpeg/libavcodec/a64tables.h
new file mode 100755
index 0000000..a955ef4
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/a64tables.h
@@ -0,0 +1,150 @@
+/*
+ * a64 video encoder - tables used by a64 encoders
+ * Copyright (c) 2009 Tobias Bindhammer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a64 video encoder - tables used by a64 encoders
+ */
+
+#ifndef AVCODEC_A64TABLES_H
+#define AVCODEC_A64TABLES_H
+
+#include <stdint.h>
+
+/**
+ * dither patterns used vor rendering the multicolor charset
+ */
+
+static const uint8_t multi_dither_patterns[9][4][4] = {
+ {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+ },
+ {
+ {1, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 1, 0},
+ {0, 0, 0, 0}
+ },
+ {
+ {1, 0, 0, 0},
+ {0, 0, 1, 0},
+ {0, 1, 0, 0},
+ {0, 0, 0, 1}
+ },
+ {
+ {1, 0, 0, 0},
+ {0, 1, 0, 1},
+ {0, 0, 1, 0},
+ {0, 1, 0, 1}
+ },
+ {
+ {1, 0, 1, 0},
+ {0, 1, 0, 1},
+ {1, 0, 1, 0},
+ {0, 1, 0, 1}
+ },
+ {
+ {1, 1, 1, 0},
+ {0, 1, 0, 1},
+ {1, 0, 1, 1},
+ {0, 1, 0, 1}
+ },
+ {
+ {0, 1, 1, 1},
+ {1, 1, 0, 1},
+ {1, 0, 1, 1},
+ {1, 1, 1, 0}
+ },
+ {
+ {0, 1, 1, 1},
+ {1, 1, 1, 1},
+ {1, 1, 0, 1},
+ {1, 1, 1, 1}
+ },
+ {
+ {1, 1, 1, 1},
+ {1, 1, 1, 1},
+ {1, 1, 1, 1},
+ {1, 1, 1, 1}
+ },
+};
+
+static const uint8_t interlaced_dither_patterns[9][8][4] = {
+ {
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ },
+ {
+ {1, 0, 1, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ {1, 0, 1, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 0, 0, 0},
+ },
+ {
+ {1, 0, 1, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 1, 0, 1},
+ {1, 0, 1, 0}, {0, 0, 0, 0},
+ {0, 0, 0, 0}, {0, 1, 0, 1},
+ },
+ {
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {0, 1, 0, 1}, {0, 0, 0, 0},
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {0, 1, 0, 1}, {0, 0, 0, 0},
+ },
+ {
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {0, 1, 0, 1}, {1, 0, 1, 0},
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {0, 1, 0, 1}, {1, 0, 1, 0},
+ },
+ {
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {1, 1, 1, 1}, {1, 0, 1, 0},
+ {1, 0, 1, 0}, {0, 1, 0, 1},
+ {1, 1, 1, 1}, {1, 0, 1, 0},
+ },
+ {
+ {1, 0, 1, 0}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {0, 1, 0, 1},
+ {1, 0, 1, 0}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {0, 1, 0, 1},
+ },
+ {
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {0, 1, 0, 1},
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {0, 1, 0, 1},
+ },
+ {
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ {1, 1, 1, 1}, {1, 1, 1, 1},
+ }
+};
+
+#endif /* AVCODEC_A64TABLES_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac.h b/samples/rtsp_player/ffmpeg/libavcodec/aac.h
new file mode 100755
index 0000000..73bc408
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac.h
@@ -0,0 +1,304 @@
+/*
+ * AAC definitions and structures
+ * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
+ * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC definitions and structures
+ * @author Oded Shimon ( ods15 ods15 dyndns org )
+ * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
+ */
+
+#ifndef AVCODEC_AAC_H
+#define AVCODEC_AAC_H
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "fft.h"
+#include "mpeg4audio.h"
+#include "sbr.h"
+#include "fmtconvert.h"
+
+#include <stdint.h>
+
+#define MAX_CHANNELS 64
+#define MAX_ELEM_ID 16
+
+#define TNS_MAX_ORDER 20
+#define MAX_LTP_LONG_SFB 40
+
+enum RawDataBlockType {
+ TYPE_SCE,
+ TYPE_CPE,
+ TYPE_CCE,
+ TYPE_LFE,
+ TYPE_DSE,
+ TYPE_PCE,
+ TYPE_FIL,
+ TYPE_END,
+};
+
+enum ExtensionPayloadID {
+ EXT_FILL,
+ EXT_FILL_DATA,
+ EXT_DATA_ELEMENT,
+ EXT_DYNAMIC_RANGE = 0xb,
+ EXT_SBR_DATA = 0xd,
+ EXT_SBR_DATA_CRC = 0xe,
+};
+
+enum WindowSequence {
+ ONLY_LONG_SEQUENCE,
+ LONG_START_SEQUENCE,
+ EIGHT_SHORT_SEQUENCE,
+ LONG_STOP_SEQUENCE,
+};
+
+enum BandType {
+ ZERO_BT = 0, ///< Scalefactors and spectral data are all zero.
+ FIRST_PAIR_BT = 5, ///< This and later band types encode two values (rather than four) with one code word.
+ ESC_BT = 11, ///< Spectral data are coded with an escape sequence.
+ NOISE_BT = 13, ///< Spectral data are scaled white noise not coded in the bitstream.
+ INTENSITY_BT2 = 14, ///< Scalefactor data are intensity stereo positions.
+ INTENSITY_BT = 15, ///< Scalefactor data are intensity stereo positions.
+};
+
+#define IS_CODEBOOK_UNSIGNED(x) ((x - 1) & 10)
+
+enum ChannelPosition {
+ AAC_CHANNEL_FRONT = 1,
+ AAC_CHANNEL_SIDE = 2,
+ AAC_CHANNEL_BACK = 3,
+ AAC_CHANNEL_LFE = 4,
+ AAC_CHANNEL_CC = 5,
+};
+
+/**
+ * The point during decoding at which channel coupling is applied.
+ */
+enum CouplingPoint {
+ BEFORE_TNS,
+ BETWEEN_TNS_AND_IMDCT,
+ AFTER_IMDCT = 3,
+};
+
+/**
+ * Output configuration status
+ */
+enum OCStatus {
+ OC_NONE, //< Output unconfigured
+ OC_TRIAL_PCE, //< Output configuration under trial specified by an inband PCE
+ OC_TRIAL_FRAME, //< Output configuration under trial specified by a frame header
+ OC_GLOBAL_HDR, //< Output configuration set in a global header but not yet locked
+ OC_LOCKED, //< Output configuration locked in place
+};
+
+/**
+ * Predictor State
+ */
+typedef struct {
+ float cor0;
+ float cor1;
+ float var0;
+ float var1;
+ float r0;
+ float r1;
+} PredictorState;
+
+#define MAX_PREDICTORS 672
+
+#define SCALE_DIV_512 36 ///< scalefactor difference that corresponds to scale difference in 512 times
+#define SCALE_ONE_POS 140 ///< scalefactor index that corresponds to scale=1.0
+#define SCALE_MAX_POS 255 ///< scalefactor index maximum value
+#define SCALE_MAX_DIFF 60 ///< maximum scalefactor difference allowed by standard
+#define SCALE_DIFF_ZERO 60 ///< codebook index corresponding to zero scalefactor indices difference
+
+/**
+ * Long Term Prediction
+ */
+typedef struct {
+ int8_t present;
+ int16_t lag;
+ float coef;
+ int8_t used[MAX_LTP_LONG_SFB];
+} LongTermPrediction;
+
+/**
+ * Individual Channel Stream
+ */
+typedef struct {
+ uint8_t max_sfb; ///< number of scalefactor bands per group
+ enum WindowSequence window_sequence[2];
+ uint8_t use_kb_window[2]; ///< If set, use Kaiser-Bessel window, otherwise use a sinus window.
+ int num_window_groups;
+ uint8_t group_len[8];
+ LongTermPrediction ltp;
+ const uint16_t *swb_offset; ///< table of offsets to the lowest spectral coefficient of a scalefactor band, sfb, for a particular window
+ const uint8_t *swb_sizes; ///< table of scalefactor band sizes for a particular window
+ int num_swb; ///< number of scalefactor window bands
+ int num_windows;
+ int tns_max_bands;
+ int predictor_present;
+ int predictor_initialized;
+ int predictor_reset_group;
+ uint8_t prediction_used[41];
+} IndividualChannelStream;
+
+/**
+ * Temporal Noise Shaping
+ */
+typedef struct {
+ int present;
+ int n_filt[8];
+ int length[8][4];
+ int direction[8][4];
+ int order[8][4];
+ float coef[8][4][TNS_MAX_ORDER];
+} TemporalNoiseShaping;
+
+/**
+ * Dynamic Range Control - decoded from the bitstream but not processed further.
+ */
+typedef struct {
+ int pce_instance_tag; ///< Indicates with which program the DRC info is associated.
+ int dyn_rng_sgn[17]; ///< DRC sign information; 0 - positive, 1 - negative
+ int dyn_rng_ctl[17]; ///< DRC magnitude information
+ int exclude_mask[MAX_CHANNELS]; ///< Channels to be excluded from DRC processing.
+ int band_incr; ///< Number of DRC bands greater than 1 having DRC info.
+ int interpolation_scheme; ///< Indicates the interpolation scheme used in the SBR QMF domain.
+ int band_top[17]; ///< Indicates the top of the i-th DRC band in units of 4 spectral lines.
+ int prog_ref_level; /**< A reference level for the long-term program audio level for all
+ * channels combined.
+ */
+} DynamicRangeControl;
+
+typedef struct {
+ int num_pulse;
+ int start;
+ int pos[4];
+ int amp[4];
+} Pulse;
+
+/**
+ * coupling parameters
+ */
+typedef struct {
+ enum CouplingPoint coupling_point; ///< The point during decoding at which coupling is applied.
+ int num_coupled; ///< number of target elements
+ enum RawDataBlockType type[8]; ///< Type of channel element to be coupled - SCE or CPE.
+ int id_select[8]; ///< element id
+ int ch_select[8]; /**< [0] shared list of gains; [1] list of gains for right channel;
+ * [2] list of gains for left channel; [3] lists of gains for both channels
+ */
+ float gain[16][120];
+} ChannelCoupling;
+
+/**
+ * Single Channel Element - used for both SCE and LFE elements.
+ */
+typedef struct {
+ IndividualChannelStream ics;
+ TemporalNoiseShaping tns;
+ Pulse pulse;
+ enum BandType band_type[128]; ///< band types
+ int band_type_run_end[120]; ///< band type run end points
+ float sf[120]; ///< scalefactors
+ int sf_idx[128]; ///< scalefactor indices (used by encoder)
+ uint8_t zeroes[128]; ///< band is not coded (used by encoder)
+ DECLARE_ALIGNED(16, float, coeffs)[1024]; ///< coefficients for IMDCT
+ DECLARE_ALIGNED(16, float, saved)[1024]; ///< overlap
+ DECLARE_ALIGNED(16, float, ret)[2048]; ///< PCM output
+ DECLARE_ALIGNED(16, int16_t, ltp_state)[3072]; ///< time signal for LTP
+ PredictorState predictor_state[MAX_PREDICTORS];
+} SingleChannelElement;
+
+/**
+ * channel element - generic struct for SCE/CPE/CCE/LFE
+ */
+typedef struct {
+ // CPE specific
+ int common_window; ///< Set if channels share a common 'IndividualChannelStream' in bitstream.
+ int ms_mode; ///< Signals mid/side stereo flags coding mode (used by encoder)
+ uint8_t ms_mask[128]; ///< Set if mid/side stereo is used for each scalefactor window band
+ // shared
+ SingleChannelElement ch[2];
+ // CCE specific
+ ChannelCoupling coup;
+ SpectralBandReplication sbr;
+} ChannelElement;
+
+/**
+ * main AAC context
+ */
+typedef struct {
+ AVCodecContext *avctx;
+
+ MPEG4AudioConfig m4ac;
+
+ int is_saved; ///< Set if elements have stored overlap from previous frame.
+ DynamicRangeControl che_drc;
+
+ /**
+ * @defgroup elements Channel element related data.
+ * @{
+ */
+ enum ChannelPosition che_pos[4][MAX_ELEM_ID]; /**< channel element channel mapping with the
+ * first index as the first 4 raw data block types
+ */
+ ChannelElement *che[4][MAX_ELEM_ID];
+ ChannelElement *tag_che_map[4][MAX_ELEM_ID];
+ int tags_mapped;
+ /** @} */
+
+ /**
+ * @defgroup temporary aligned temporary buffers (We do not want to have these on the stack.)
+ * @{
+ */
+ DECLARE_ALIGNED(16, float, buf_mdct)[1024];
+ /** @} */
+
+ /**
+ * @defgroup tables Computed / set up during initialization.
+ * @{
+ */
+ FFTContext mdct;
+ FFTContext mdct_small;
+ FFTContext mdct_ltp;
+ DSPContext dsp;
+ FmtConvertContext fmt_conv;
+ int random_state;
+ /** @} */
+
+ /**
+ * @defgroup output Members used for output interleaving.
+ * @{
+ */
+ float *output_data[MAX_CHANNELS]; ///< Points to each element's 'ret' buffer (PCM output).
+ float sf_scale; ///< Pre-scale for correct IMDCT and dsp.float_to_int16.
+ int sf_offset; ///< offset into pow2sf_tab as appropriate for dsp.float_to_int16
+ /** @} */
+
+ DECLARE_ALIGNED(16, float, temp)[128];
+
+ enum OCStatus output_configured;
+} AACContext;
+
+#endif /* AVCODEC_AAC_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.c b/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.c
new file mode 100755
index 0000000..6f6ed89
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.c
@@ -0,0 +1,103 @@
+/*
+ * Common AAC and AC-3 parser
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+#include "aac_ac3_parser.h"
+
+int ff_aac_ac3_parse(AVCodecParserContext *s1,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ AACAC3ParseContext *s = s1->priv_data;
+ ParseContext *pc = &s->pc;
+ int len, i;
+ int new_frame_start;
+
+get_next:
+ i=END_NOT_FOUND;
+ if(s->remaining_size <= buf_size){
+ if(s->remaining_size && !s->need_next_header){
+ i= s->remaining_size;
+ s->remaining_size = 0;
+ }else{ //we need a header first
+ len=0;
+ for(i=s->remaining_size; i<buf_size; i++){
+ s->state = (s->state<<8) + buf[i];
+ if((len=s->sync(s->state, s, &s->need_next_header, &new_frame_start)))
+ break;
+ }
+ if(len<=0){
+ i=END_NOT_FOUND;
+ }else{
+ s->state=0;
+ i-= s->header_size -1;
+ s->remaining_size = len;
+ if(!new_frame_start || pc->index+i<=0){
+ s->remaining_size += i;
+ goto get_next;
+ }
+ }
+ }
+ }
+
+ if(ff_combine_frame(pc, i, &buf, &buf_size)<0){
+ s->remaining_size -= FFMIN(s->remaining_size, buf_size);
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+
+ *poutbuf = buf;
+ *poutbuf_size = buf_size;
+
+ /* update codec info */
+ if(s->codec_id)
+ avctx->codec_id = s->codec_id;
+
+ /* Due to backwards compatible HE-AAC the sample rate, channel count,
+ and total number of samples found in an AAC ADTS header are not
+ reliable. Bit rate is still accurate because the total frame duration in
+ seconds is still correct (as is the number of bits in the frame). */
+ if (avctx->codec_id != CODEC_ID_AAC) {
+ avctx->sample_rate = s->sample_rate;
+
+ /* allow downmixing to stereo (or mono for AC-3) */
+ if(avctx->request_channels > 0 &&
+ avctx->request_channels < s->channels &&
+ (avctx->request_channels <= 2 ||
+ (avctx->request_channels == 1 &&
+ (avctx->codec_id == CODEC_ID_AC3 ||
+ avctx->codec_id == CODEC_ID_EAC3)))) {
+ avctx->channels = avctx->request_channels;
+ } else {
+ avctx->channels = s->channels;
+ avctx->channel_layout = s->channel_layout;
+ }
+ avctx->frame_size = s->samples;
+ avctx->audio_service_type = s->service_type;
+ }
+
+ avctx->bit_rate = s->bit_rate;
+
+ return i;
+}
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.h b/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.h
new file mode 100755
index 0000000..ccc387d
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_ac3_parser.h
@@ -0,0 +1,66 @@
+/*
+ * Common AAC and AC-3 parser prototypes
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AAC_AC3_PARSER_H
+#define AVCODEC_AAC_AC3_PARSER_H
+
+#include <stdint.h>
+#include "avcodec.h"
+#include "parser.h"
+
+typedef enum {
+ AAC_AC3_PARSE_ERROR_SYNC = -1,
+ AAC_AC3_PARSE_ERROR_BSID = -2,
+ AAC_AC3_PARSE_ERROR_SAMPLE_RATE = -3,
+ AAC_AC3_PARSE_ERROR_FRAME_SIZE = -4,
+ AAC_AC3_PARSE_ERROR_FRAME_TYPE = -5,
+ AAC_AC3_PARSE_ERROR_CRC = -6,
+ AAC_AC3_PARSE_ERROR_CHANNEL_CFG = -7,
+} AACAC3ParseError;
+
+typedef struct AACAC3ParseContext {
+ ParseContext pc;
+ int frame_size;
+ int header_size;
+ int (*sync)(uint64_t state, struct AACAC3ParseContext *hdr_info,
+ int *need_next_header, int *new_frame_start);
+
+ int channels;
+ int sample_rate;
+ int bit_rate;
+ int samples;
+ int64_t channel_layout;
+ int service_type;
+
+ int remaining_size;
+ uint64_t state;
+
+ int need_next_header;
+ enum CodecID codec_id;
+} AACAC3ParseContext;
+
+int ff_aac_ac3_parse(AVCodecParserContext *s1,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+
+#endif /* AVCODEC_AAC_AC3_PARSER_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_adtstoasc_bsf.c b/samples/rtsp_player/ffmpeg/libavcodec/aac_adtstoasc_bsf.c
new file mode 100755
index 0000000..6558c02
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_adtstoasc_bsf.c
@@ -0,0 +1,114 @@
+/*
+ * MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration bitstream filter
+ * Copyright (c) 2009 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "aacadtsdec.h"
+#include "put_bits.h"
+#include "get_bits.h"
+#include "mpeg4audio.h"
+#include "internal.h"
+
+typedef struct AACBSFContext {
+ int first_frame_done;
+} AACBSFContext;
+
+/**
+ * This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
+ * ADTS header and removes the ADTS header.
+ */
+static int aac_adtstoasc_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int keyframe)
+{
+ GetBitContext gb;
+ PutBitContext pb;
+ AACADTSHeaderInfo hdr;
+
+ AACBSFContext *ctx = bsfc->priv_data;
+
+ init_get_bits(&gb, buf, AAC_ADTS_HEADER_SIZE*8);
+
+ *poutbuf = (uint8_t*) buf;
+ *poutbuf_size = buf_size;
+
+ if (avctx->extradata)
+ if (show_bits(&gb, 12) != 0xfff)
+ return 0;
+
+ if (ff_aac_parse_header(&gb, &hdr) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error parsing ADTS frame header!\n");
+ return -1;
+ }
+
+ if (!hdr.crc_absent && hdr.num_aac_frames > 1) {
+ av_log_missing_feature(avctx, "Multiple RDBs per frame with CRC is", 0);
+ return -1;
+ }
+
+ buf += AAC_ADTS_HEADER_SIZE + 2*!hdr.crc_absent;
+ buf_size -= AAC_ADTS_HEADER_SIZE + 2*!hdr.crc_absent;
+
+ if (!ctx->first_frame_done) {
+ int pce_size = 0;
+ uint8_t pce_data[MAX_PCE_SIZE];
+ if (!hdr.chan_config) {
+ init_get_bits(&gb, buf, buf_size);
+ if (get_bits(&gb, 3) != 5) {
+ av_log_missing_feature(avctx, "PCE based channel configuration, where the PCE is not the first syntax element is", 0);
+ return -1;
+ }
+ init_put_bits(&pb, pce_data, MAX_PCE_SIZE);
+ pce_size = ff_copy_pce_data(&pb, &gb)/8;
+ flush_put_bits(&pb);
+ buf_size -= get_bits_count(&gb)/8;
+ buf += get_bits_count(&gb)/8;
+ }
+ avctx->extradata_size = 2 + pce_size;
+ avctx->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ init_put_bits(&pb, avctx->extradata, avctx->extradata_size);
+ put_bits(&pb, 5, hdr.object_type);
+ put_bits(&pb, 4, hdr.sampling_index);
+ put_bits(&pb, 4, hdr.chan_config);
+ put_bits(&pb, 1, 0); //frame length - 1024 samples
+ put_bits(&pb, 1, 0); //does not depend on core coder
+ put_bits(&pb, 1, 0); //is not extension
+ flush_put_bits(&pb);
+ if (pce_size) {
+ memcpy(avctx->extradata + 2, pce_data, pce_size);
+ }
+
+ ctx->first_frame_done = 1;
+ }
+
+ *poutbuf = (uint8_t*) buf;
+ *poutbuf_size = buf_size;
+
+ return 0;
+}
+
+AVBitStreamFilter ff_aac_adtstoasc_bsf = {
+ "aac_adtstoasc",
+ sizeof(AACBSFContext),
+ aac_adtstoasc_filter,
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_parser.c b/samples/rtsp_player/ffmpeg/libavcodec/aac_parser.c
new file mode 100755
index 0000000..9165178
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_parser.c
@@ -0,0 +1,69 @@
+/*
+ * Audio and Video frame extraction
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "parser.h"
+#include "aac_ac3_parser.h"
+#include "aacadtsdec.h"
+#include "get_bits.h"
+#include "mpeg4audio.h"
+
+static int aac_sync(uint64_t state, AACAC3ParseContext *hdr_info,
+ int *need_next_header, int *new_frame_start)
+{
+ GetBitContext bits;
+ AACADTSHeaderInfo hdr;
+ int size;
+ union {
+ uint64_t u64;
+ uint8_t u8[8];
+ } tmp;
+
+ tmp.u64 = av_be2ne64(state);
+ init_get_bits(&bits, tmp.u8+8-AAC_ADTS_HEADER_SIZE, AAC_ADTS_HEADER_SIZE * 8);
+
+ if ((size = ff_aac_parse_header(&bits, &hdr)) < 0)
+ return 0;
+ *need_next_header = 0;
+ *new_frame_start = 1;
+ hdr_info->sample_rate = hdr.sample_rate;
+ hdr_info->channels = ff_mpeg4audio_channels[hdr.chan_config];
+ hdr_info->samples = hdr.samples;
+ hdr_info->bit_rate = hdr.bit_rate;
+ return size;
+}
+
+static av_cold int aac_parse_init(AVCodecParserContext *s1)
+{
+ AACAC3ParseContext *s = s1->priv_data;
+ s->header_size = AAC_ADTS_HEADER_SIZE;
+ s->sync = aac_sync;
+ return 0;
+}
+
+
+AVCodecParser ff_aac_parser = {
+ { CODEC_ID_AAC },
+ sizeof(AACAC3ParseContext),
+ aac_parse_init,
+ ff_aac_ac3_parse,
+ ff_parse_close,
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.c b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.c
new file mode 100755
index 0000000..33a179f
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.c
@@ -0,0 +1,37 @@
+/*
+ * Generate a header file for hardcoded AAC tables
+ *
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#define CONFIG_HARDCODED_TABLES 0
+#include "aac_tablegen.h"
+#include "tableprint.h"
+
+int main(void)
+{
+ ff_aac_tableinit();
+
+ write_fileheader();
+
+ WRITE_ARRAY("const", float, ff_aac_pow2sf_tab);
+
+ return 0;
+}
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.h b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.h
new file mode 100755
index 0000000..3a820ba
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen.h
@@ -0,0 +1,42 @@
+/*
+ * Header file for hardcoded AAC tables
+ *
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AAC_TABLEGEN_H
+#define AAC_TABLEGEN_H
+
+#include "aac_tablegen_decl.h"
+
+#if CONFIG_HARDCODED_TABLES
+#include "libavcodec/aac_tables.h"
+#else
+#include "libavutil/mathematics.h"
+float ff_aac_pow2sf_tab[428];
+
+void ff_aac_tableinit(void)
+{
+ int i;
+ for (i = 0; i < 428; i++)
+ ff_aac_pow2sf_tab[i] = pow(2, (i - 200) / 4.);
+}
+#endif /* CONFIG_HARDCODED_TABLES */
+
+#endif /* AAC_TABLEGEN_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen_decl.h b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen_decl.h
new file mode 100755
index 0000000..9a90a09
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aac_tablegen_decl.h
@@ -0,0 +1,34 @@
+/*
+ * Header file for hardcoded AAC tables
+ *
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AAC_TABLEGEN_DECL_H
+#define AAC_TABLEGEN_DECL_H
+
+#if CONFIG_HARDCODED_TABLES
+#define ff_aac_tableinit()
+extern const float ff_aac_pow2sf_tab[428];
+#else
+void ff_aac_tableinit(void);
+extern float ff_aac_pow2sf_tab[428];
+#endif /* CONFIG_HARDCODED_TABLES */
+
+#endif /* AAC_TABLEGEN_DECL_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.c b/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.c
new file mode 100755
index 0000000..fd86d28
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.c
@@ -0,0 +1,70 @@
+/*
+ * Audio and Video frame extraction
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ * Copyright (c) 2009 Alex Converse
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "aac_ac3_parser.h"
+#include "aacadtsdec.h"
+#include "get_bits.h"
+#include "mpeg4audio.h"
+
+int ff_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr)
+{
+ int size, rdb, ch, sr;
+ int aot, crc_abs;
+
+ if(get_bits(gbc, 12) != 0xfff)
+ return AAC_AC3_PARSE_ERROR_SYNC;
+
+ skip_bits1(gbc); /* id */
+ skip_bits(gbc, 2); /* layer */
+ crc_abs = get_bits1(gbc); /* protection_absent */
+ aot = get_bits(gbc, 2); /* profile_objecttype */
+ sr = get_bits(gbc, 4); /* sample_frequency_index */
+ if(!ff_mpeg4audio_sample_rates[sr])
+ return AAC_AC3_PARSE_ERROR_SAMPLE_RATE;
+ skip_bits1(gbc); /* private_bit */
+ ch = get_bits(gbc, 3); /* channel_configuration */
+
+ skip_bits1(gbc); /* original/copy */
+ skip_bits1(gbc); /* home */
+
+ /* adts_variable_header */
+ skip_bits1(gbc); /* copyright_identification_bit */
+ skip_bits1(gbc); /* copyright_identification_start */
+ size = get_bits(gbc, 13); /* aac_frame_length */
+ if(size < AAC_ADTS_HEADER_SIZE)
+ return AAC_AC3_PARSE_ERROR_FRAME_SIZE;
+
+ skip_bits(gbc, 11); /* adts_buffer_fullness */
+ rdb = get_bits(gbc, 2); /* number_of_raw_data_blocks_in_frame */
+
+ hdr->object_type = aot + 1;
+ hdr->chan_config = ch;
+ hdr->crc_absent = crc_abs;
+ hdr->num_aac_frames = rdb + 1;
+ hdr->sampling_index = sr;
+ hdr->sample_rate = ff_mpeg4audio_sample_rates[sr];
+ hdr->samples = (rdb + 1) * 1024;
+ hdr->bit_rate = size * 8 * hdr->sample_rate / hdr->samples;
+
+ return size;
+}
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.h b/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.h
new file mode 100755
index 0000000..6e0a869
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacadtsdec.h
@@ -0,0 +1,54 @@
+/*
+ * AAC ADTS header decoding prototypes and structures
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AACADTSDEC_H
+#define AVCODEC_AACADTSDEC_H
+
+#include <stdint.h>
+#include "get_bits.h"
+
+#define AAC_ADTS_HEADER_SIZE 7
+
+typedef struct {
+ uint32_t sample_rate;
+ uint32_t samples;
+ uint32_t bit_rate;
+ uint8_t crc_absent;
+ uint8_t object_type;
+ uint8_t sampling_index;
+ uint8_t chan_config;
+ uint8_t num_aac_frames;
+} AACADTSHeaderInfo;
+
+/**
+ * Parse AAC frame header.
+ * Parse the ADTS frame header to the end of the variable header, which is
+ * the first 54 bits.
+ * @param[in] gbc BitContext containing the first 54 bits of the frame.
+ * @param[out] hdr Pointer to struct where header info is written.
+ * @return Returns 0 on success, -1 if there is a sync word mismatch,
+ * -2 if the version element is invalid, -3 if the sample rate
+ * element is invalid, or -4 if the bit rate element is invalid.
+ */
+int ff_aac_parse_header(GetBitContext *gbc, AACADTSHeaderInfo *hdr);
+
+#endif /* AVCODEC_AACADTSDEC_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aaccoder.c b/samples/rtsp_player/ffmpeg/libavcodec/aaccoder.c
new file mode 100755
index 0000000..9748fe1
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aaccoder.c
@@ -0,0 +1,1125 @@
+/*
+ * AAC coefficients encoder
+ * Copyright (C) 2008-2009 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC coefficients encoder
+ */
+
+/***********************************
+ * TODOs:
+ * speedup quantizer selection
+ * add sane pulse detection
+ ***********************************/
+
+#include <float.h>
+#include <math.h>
+#include "avcodec.h"
+#include "put_bits.h"
+#include "aac.h"
+#include "aacenc.h"
+#include "aactab.h"
+
+/** bits needed to code codebook run value for long windows */
+static const uint8_t run_value_bits_long[64] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 15
+};
+
+/** bits needed to code codebook run value for short windows */
+static const uint8_t run_value_bits_short[16] = {
+ 3, 3, 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 9
+};
+
+static const uint8_t *run_value_bits[2] = {
+ run_value_bits_long, run_value_bits_short
+};
+
+
+/**
+ * Quantize one coefficient.
+ * @return absolute value of the quantized coefficient
+ * @see 3GPP TS26.403 5.6.2 "Scalefactor determination"
+ */
+static av_always_inline int quant(float coef, const float Q)
+{
+ float a = coef * Q;
+ return sqrtf(a * sqrtf(a)) + 0.4054;
+}
+
+static void quantize_bands(int *out, const float *in, const float *scaled,
+ int size, float Q34, int is_signed, int maxval)
+{
+ int i;
+ double qc;
+ for (i = 0; i < size; i++) {
+ qc = scaled[i] * Q34;
+ out[i] = (int)FFMIN(qc + 0.4054, (double)maxval);
+ if (is_signed && in[i] < 0.0f) {
+ out[i] = -out[i];
+ }
+ }
+}
+
+static void abs_pow34_v(float *out, const float *in, const int size)
+{
+#ifndef USE_REALLY_FULL_SEARCH
+ int i;
+ for (i = 0; i < size; i++) {
+ float a = fabsf(in[i]);
+ out[i] = sqrtf(a * sqrtf(a));
+ }
+#endif /* USE_REALLY_FULL_SEARCH */
+}
+
+static const uint8_t aac_cb_range [12] = {0, 3, 3, 3, 3, 9, 9, 8, 8, 13, 13, 17};
+static const uint8_t aac_cb_maxval[12] = {0, 1, 1, 2, 2, 4, 4, 7, 7, 12, 12, 16};
+
+/**
+ * Calculate rate distortion cost for quantizing with given codebook
+ *
+ * @return quantization distortion
+ */
+static av_always_inline float quantize_and_encode_band_cost_template(
+ struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits, int BT_ZERO, int BT_UNSIGNED,
+ int BT_PAIR, int BT_ESC)
+{
+ const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
+ const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
+ const float CLIPPED_ESCAPE = 165140.0f*IQ;
+ int i, j;
+ float cost = 0;
+ const int dim = BT_PAIR ? 2 : 4;
+ int resbits = 0;
+ const float Q34 = sqrtf(Q * sqrtf(Q));
+ const int range = aac_cb_range[cb];
+ const int maxval = aac_cb_maxval[cb];
+ int off;
+
+ if (BT_ZERO) {
+ for (i = 0; i < size; i++)
+ cost += in[i]*in[i];
+ if (bits)
+ *bits = 0;
+ return cost * lambda;
+ }
+ if (!scaled) {
+ abs_pow34_v(s->scoefs, in, size);
+ scaled = s->scoefs;
+ }
+ quantize_bands(s->qcoefs, in, scaled, size, Q34, !BT_UNSIGNED, maxval);
+ if (BT_UNSIGNED) {
+ off = 0;
+ } else {
+ off = maxval;
+ }
+ for (i = 0; i < size; i += dim) {
+ const float *vec;
+ int *quants = s->qcoefs + i;
+ int curidx = 0;
+ int curbits;
+ float rd = 0.0f;
+ for (j = 0; j < dim; j++) {
+ curidx *= range;
+ curidx += quants[j] + off;
+ }
+ curbits = ff_aac_spectral_bits[cb-1][curidx];
+ vec = &ff_aac_codebook_vectors[cb-1][curidx*dim];
+ if (BT_UNSIGNED) {
+ for (j = 0; j < dim; j++) {
+ float t = fabsf(in[i+j]);
+ float di;
+ if (BT_ESC && vec[j] == 64.0f) { //FIXME: slow
+ if (t >= CLIPPED_ESCAPE) {
+ di = t - CLIPPED_ESCAPE;
+ curbits += 21;
+ } else {
+ int c = av_clip(quant(t, Q), 0, 8191);
+ di = t - c*cbrtf(c)*IQ;
+ curbits += av_log2(c)*2 - 4 + 1;
+ }
+ } else {
+ di = t - vec[j]*IQ;
+ }
+ if (vec[j] != 0.0f)
+ curbits++;
+ rd += di*di;
+ }
+ } else {
+ for (j = 0; j < dim; j++) {
+ float di = in[i+j] - vec[j]*IQ;
+ rd += di*di;
+ }
+ }
+ cost += rd * lambda + curbits;
+ resbits += curbits;
+ if (cost >= uplim)
+ return uplim;
+ if (pb) {
+ put_bits(pb, ff_aac_spectral_bits[cb-1][curidx], ff_aac_spectral_codes[cb-1][curidx]);
+ if (BT_UNSIGNED)
+ for (j = 0; j < dim; j++)
+ if (ff_aac_codebook_vectors[cb-1][curidx*dim+j] != 0.0f)
+ put_bits(pb, 1, in[i+j] < 0.0f);
+ if (BT_ESC) {
+ for (j = 0; j < 2; j++) {
+ if (ff_aac_codebook_vectors[cb-1][curidx*2+j] == 64.0f) {
+ int coef = av_clip(quant(fabsf(in[i+j]), Q), 0, 8191);
+ int len = av_log2(coef);
+
+ put_bits(pb, len - 4 + 1, (1 << (len - 4 + 1)) - 2);
+ put_bits(pb, len, coef & ((1 << len) - 1));
+ }
+ }
+ }
+ }
+ }
+
+ if (bits)
+ *bits = resbits;
+ return cost;
+}
+
+#define QUANTIZE_AND_ENCODE_BAND_COST_FUNC(NAME, BT_ZERO, BT_UNSIGNED, BT_PAIR, BT_ESC) \
+static float quantize_and_encode_band_cost_ ## NAME( \
+ struct AACEncContext *s, \
+ PutBitContext *pb, const float *in, \
+ const float *scaled, int size, int scale_idx, \
+ int cb, const float lambda, const float uplim, \
+ int *bits) { \
+ return quantize_and_encode_band_cost_template( \
+ s, pb, in, scaled, size, scale_idx, \
+ BT_ESC ? ESC_BT : cb, lambda, uplim, bits, \
+ BT_ZERO, BT_UNSIGNED, BT_PAIR, BT_ESC); \
+}
+
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(ZERO, 1, 0, 0, 0)
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(SQUAD, 0, 0, 0, 0)
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(UQUAD, 0, 1, 0, 0)
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(SPAIR, 0, 0, 1, 0)
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(UPAIR, 0, 1, 1, 0)
+QUANTIZE_AND_ENCODE_BAND_COST_FUNC(ESC, 0, 1, 1, 1)
+
+static float (*const quantize_and_encode_band_cost_arr[])(
+ struct AACEncContext *s,
+ PutBitContext *pb, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits) = {
+ quantize_and_encode_band_cost_ZERO,
+ quantize_and_encode_band_cost_SQUAD,
+ quantize_and_encode_band_cost_SQUAD,
+ quantize_and_encode_band_cost_UQUAD,
+ quantize_and_encode_band_cost_UQUAD,
+ quantize_and_encode_band_cost_SPAIR,
+ quantize_and_encode_band_cost_SPAIR,
+ quantize_and_encode_band_cost_UPAIR,
+ quantize_and_encode_band_cost_UPAIR,
+ quantize_and_encode_band_cost_UPAIR,
+ quantize_and_encode_band_cost_UPAIR,
+ quantize_and_encode_band_cost_ESC,
+};
+
+#define quantize_and_encode_band_cost( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits) \
+ quantize_and_encode_band_cost_arr[cb]( \
+ s, pb, in, scaled, size, scale_idx, cb, \
+ lambda, uplim, bits)
+
+static float quantize_band_cost(struct AACEncContext *s, const float *in,
+ const float *scaled, int size, int scale_idx,
+ int cb, const float lambda, const float uplim,
+ int *bits)
+{
+ return quantize_and_encode_band_cost(s, NULL, in, scaled, size, scale_idx,
+ cb, lambda, uplim, bits);
+}
+
+static void quantize_and_encode_band(struct AACEncContext *s, PutBitContext *pb,
+ const float *in, int size, int scale_idx,
+ int cb, const float lambda)
+{
+ quantize_and_encode_band_cost(s, pb, in, NULL, size, scale_idx, cb, lambda,
+ INFINITY, NULL);
+}
+
+static float find_max_val(int group_len, int swb_size, const float *scaled) {
+ float maxval = 0.0f;
+ int w2, i;
+ for (w2 = 0; w2 < group_len; w2++) {
+ for (i = 0; i < swb_size; i++) {
+ maxval = FFMAX(maxval, scaled[w2*128+i]);
+ }
+ }
+ return maxval;
+}
+
+static int find_min_book(float maxval, int sf) {
+ float Q = ff_aac_pow2sf_tab[200 - sf + SCALE_ONE_POS - SCALE_DIV_512];
+ float Q34 = sqrtf(Q * sqrtf(Q));
+ int qmaxval, cb;
+ qmaxval = maxval * Q34 + 0.4054f;
+ if (qmaxval == 0) cb = 0;
+ else if (qmaxval == 1) cb = 1;
+ else if (qmaxval == 2) cb = 3;
+ else if (qmaxval <= 4) cb = 5;
+ else if (qmaxval <= 7) cb = 7;
+ else if (qmaxval <= 12) cb = 9;
+ else cb = 11;
+ return cb;
+}
+
+/**
+ * structure used in optimal codebook search
+ */
+typedef struct BandCodingPath {
+ int prev_idx; ///< pointer to the previous path point
+ float cost; ///< path cost
+ int run;
+} BandCodingPath;
+
+/**
+ * Encode band info for single window group bands.
+ */
+static void encode_window_bands_info(AACEncContext *s, SingleChannelElement *sce,
+ int win, int group_len, const float lambda)
+{
+ BandCodingPath path[120][12];
+ int w, swb, cb, start, start2, size;
+ int i, j;
+ const int max_sfb = sce->ics.max_sfb;
+ const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
+ const int run_esc = (1 << run_bits) - 1;
+ int idx, ppos, count;
+ int stackrun[120], stackcb[120], stack_len;
+ float next_minrd = INFINITY;
+ int next_mincb = 0;
+
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+ start = win*128;
+ for (cb = 0; cb < 12; cb++) {
+ path[0][cb].cost = 0.0f;
+ path[0][cb].prev_idx = -1;
+ path[0][cb].run = 0;
+ }
+ for (swb = 0; swb < max_sfb; swb++) {
+ start2 = start;
+ size = sce->ics.swb_sizes[swb];
+ if (sce->zeroes[win*16 + swb]) {
+ for (cb = 0; cb < 12; cb++) {
+ path[swb+1][cb].prev_idx = cb;
+ path[swb+1][cb].cost = path[swb][cb].cost;
+ path[swb+1][cb].run = path[swb][cb].run + 1;
+ }
+ } else {
+ float minrd = next_minrd;
+ int mincb = next_mincb;
+ next_minrd = INFINITY;
+ next_mincb = 0;
+ for (cb = 0; cb < 12; cb++) {
+ float cost_stay_here, cost_get_here;
+ float rd = 0.0f;
+ for (w = 0; w < group_len; w++) {
+ FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(win+w)*16+swb];
+ rd += quantize_band_cost(s, sce->coeffs + start + w*128,
+ s->scoefs + start + w*128, size,
+ sce->sf_idx[(win+w)*16+swb], cb,
+ lambda / band->threshold, INFINITY, NULL);
+ }
+ cost_stay_here = path[swb][cb].cost + rd;
+ cost_get_here = minrd + rd + run_bits + 4;
+ if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
+ != run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
+ cost_stay_here += run_bits;
+ if (cost_get_here < cost_stay_here) {
+ path[swb+1][cb].prev_idx = mincb;
+ path[swb+1][cb].cost = cost_get_here;
+ path[swb+1][cb].run = 1;
+ } else {
+ path[swb+1][cb].prev_idx = cb;
+ path[swb+1][cb].cost = cost_stay_here;
+ path[swb+1][cb].run = path[swb][cb].run + 1;
+ }
+ if (path[swb+1][cb].cost < next_minrd) {
+ next_minrd = path[swb+1][cb].cost;
+ next_mincb = cb;
+ }
+ }
+ }
+ start += sce->ics.swb_sizes[swb];
+ }
+
+ //convert resulting path from backward-linked list
+ stack_len = 0;
+ idx = 0;
+ for (cb = 1; cb < 12; cb++)
+ if (path[max_sfb][cb].cost < path[max_sfb][idx].cost)
+ idx = cb;
+ ppos = max_sfb;
+ while (ppos > 0) {
+ cb = idx;
+ stackrun[stack_len] = path[ppos][cb].run;
+ stackcb [stack_len] = cb;
+ idx = path[ppos-path[ppos][cb].run+1][cb].prev_idx;
+ ppos -= path[ppos][cb].run;
+ stack_len++;
+ }
+ //perform actual band info encoding
+ start = 0;
+ for (i = stack_len - 1; i >= 0; i--) {
+ put_bits(&s->pb, 4, stackcb[i]);
+ count = stackrun[i];
+ memset(sce->zeroes + win*16 + start, !stackcb[i], count);
+ //XXX: memset when band_type is also uint8_t
+ for (j = 0; j < count; j++) {
+ sce->band_type[win*16 + start] = stackcb[i];
+ start++;
+ }
+ while (count >= run_esc) {
+ put_bits(&s->pb, run_bits, run_esc);
+ count -= run_esc;
+ }
+ put_bits(&s->pb, run_bits, count);
+ }
+}
+
+static void codebook_trellis_rate(AACEncContext *s, SingleChannelElement *sce,
+ int win, int group_len, const float lambda)
+{
+ BandCodingPath path[120][12];
+ int w, swb, cb, start, start2, size;
+ int i, j;
+ const int max_sfb = sce->ics.max_sfb;
+ const int run_bits = sce->ics.num_windows == 1 ? 5 : 3;
+ const int run_esc = (1 << run_bits) - 1;
+ int idx, ppos, count;
+ int stackrun[120], stackcb[120], stack_len;
+ float next_minrd = INFINITY;
+ int next_mincb = 0;
+
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+ start = win*128;
+ for (cb = 0; cb < 12; cb++) {
+ path[0][cb].cost = run_bits+4;
+ path[0][cb].prev_idx = -1;
+ path[0][cb].run = 0;
+ }
+ for (swb = 0; swb < max_sfb; swb++) {
+ start2 = start;
+ size = sce->ics.swb_sizes[swb];
+ if (sce->zeroes[win*16 + swb]) {
+ for (cb = 0; cb < 12; cb++) {
+ path[swb+1][cb].prev_idx = cb;
+ path[swb+1][cb].cost = path[swb][cb].cost;
+ path[swb+1][cb].run = path[swb][cb].run + 1;
+ }
+ } else {
+ float minrd = next_minrd;
+ int mincb = next_mincb;
+ int startcb = sce->band_type[win*16+swb];
+ next_minrd = INFINITY;
+ next_mincb = 0;
+ for (cb = 0; cb < startcb; cb++) {
+ path[swb+1][cb].cost = 61450;
+ path[swb+1][cb].prev_idx = -1;
+ path[swb+1][cb].run = 0;
+ }
+ for (cb = startcb; cb < 12; cb++) {
+ float cost_stay_here, cost_get_here;
+ float rd = 0.0f;
+ for (w = 0; w < group_len; w++) {
+ rd += quantize_band_cost(s, sce->coeffs + start + w*128,
+ s->scoefs + start + w*128, size,
+ sce->sf_idx[(win+w)*16+swb], cb,
+ 0, INFINITY, NULL);
+ }
+ cost_stay_here = path[swb][cb].cost + rd;
+ cost_get_here = minrd + rd + run_bits + 4;
+ if ( run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run]
+ != run_value_bits[sce->ics.num_windows == 8][path[swb][cb].run+1])
+ cost_stay_here += run_bits;
+ if (cost_get_here < cost_stay_here) {
+ path[swb+1][cb].prev_idx = mincb;
+ path[swb+1][cb].cost = cost_get_here;
+ path[swb+1][cb].run = 1;
+ } else {
+ path[swb+1][cb].prev_idx = cb;
+ path[swb+1][cb].cost = cost_stay_here;
+ path[swb+1][cb].run = path[swb][cb].run + 1;
+ }
+ if (path[swb+1][cb].cost < next_minrd) {
+ next_minrd = path[swb+1][cb].cost;
+ next_mincb = cb;
+ }
+ }
+ }
+ start += sce->ics.swb_sizes[swb];
+ }
+
+ //convert resulting path from backward-linked list
+ stack_len = 0;
+ idx = 0;
+ for (cb = 1; cb < 12; cb++)
+ if (path[max_sfb][cb].cost < path[max_sfb][idx].cost)
+ idx = cb;
+ ppos = max_sfb;
+ while (ppos > 0) {
+ assert(idx >= 0);
+ cb = idx;
+ stackrun[stack_len] = path[ppos][cb].run;
+ stackcb [stack_len] = cb;
+ idx = path[ppos-path[ppos][cb].run+1][cb].prev_idx;
+ ppos -= path[ppos][cb].run;
+ stack_len++;
+ }
+ //perform actual band info encoding
+ start = 0;
+ for (i = stack_len - 1; i >= 0; i--) {
+ put_bits(&s->pb, 4, stackcb[i]);
+ count = stackrun[i];
+ memset(sce->zeroes + win*16 + start, !stackcb[i], count);
+ //XXX: memset when band_type is also uint8_t
+ for (j = 0; j < count; j++) {
+ sce->band_type[win*16 + start] = stackcb[i];
+ start++;
+ }
+ while (count >= run_esc) {
+ put_bits(&s->pb, run_bits, run_esc);
+ count -= run_esc;
+ }
+ put_bits(&s->pb, run_bits, count);
+ }
+}
+
+/** Return the minimum scalefactor where the quantized coef does not clip. */
+static av_always_inline uint8_t coef2minsf(float coef) {
+ return av_clip_uint8(log2f(coef)*4 - 69 + SCALE_ONE_POS - SCALE_DIV_512);
+}
+
+/** Return the maximum scalefactor where the quantized coef is not zero. */
+static av_always_inline uint8_t coef2maxsf(float coef) {
+ return av_clip_uint8(log2f(coef)*4 + 6 + SCALE_ONE_POS - SCALE_DIV_512);
+}
+
+typedef struct TrellisPath {
+ float cost;
+ int prev;
+} TrellisPath;
+
+#define TRELLIS_STAGES 121
+#define TRELLIS_STATES (SCALE_MAX_DIFF+1)
+
+static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce,
+ const float lambda)
+{
+ int q, w, w2, g, start = 0;
+ int i, j;
+ int idx;
+ TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES];
+ int bandaddr[TRELLIS_STAGES];
+ int minq;
+ float mincost;
+ float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f;
+ int q0, q1, qcnt = 0;
+
+ for (i = 0; i < 1024; i++) {
+ float t = fabsf(sce->coeffs[i]);
+ if (t > 0.0f) {
+ q0f = FFMIN(q0f, t);
+ q1f = FFMAX(q1f, t);
+ qnrgf += t*t;
+ qcnt++;
+ }
+ }
+
+ if (!qcnt) {
+ memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
+ memset(sce->zeroes, 1, sizeof(sce->zeroes));
+ return;
+ }
+
+ //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped
+ q0 = coef2minsf(q0f);
+ //maximum scalefactor index is when maximum coefficient after quantizing is still not zero
+ q1 = coef2maxsf(q1f);
+ //av_log(NULL, AV_LOG_ERROR, "q0 %d, q1 %d\n", q0, q1);
+ if (q1 - q0 > 60) {
+ int q0low = q0;
+ int q1high = q1;
+ //minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped
+ int qnrg = av_clip_uint8(log2f(sqrtf(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512);
+ q1 = qnrg + 30;
+ q0 = qnrg - 30;
+ //av_log(NULL, AV_LOG_ERROR, "q0 %d, q1 %d\n", q0, q1);
+ if (q0 < q0low) {
+ q1 += q0low - q0;
+ q0 = q0low;
+ } else if (q1 > q1high) {
+ q0 -= q1 - q1high;
+ q1 = q1high;
+ }
+ }
+ //av_log(NULL, AV_LOG_ERROR, "q0 %d, q1 %d\n", q0, q1);
+
+ for (i = 0; i < TRELLIS_STATES; i++) {
+ paths[0][i].cost = 0.0f;
+ paths[0][i].prev = -1;
+ }
+ for (j = 1; j < TRELLIS_STAGES; j++) {
+ for (i = 0; i < TRELLIS_STATES; i++) {
+ paths[j][i].cost = INFINITY;
+ paths[j][i].prev = -2;
+ }
+ }
+ idx = 1;
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *coefs = sce->coeffs + start;
+ float qmin, qmax;
+ int nz = 0;
+
+ bandaddr[idx] = w * 16 + g;
+ qmin = INT_MAX;
+ qmax = 0.0f;
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];
+ if (band->energy <= band->threshold || band->threshold == 0.0f) {
+ sce->zeroes[(w+w2)*16+g] = 1;
+ continue;
+ }
+ sce->zeroes[(w+w2)*16+g] = 0;
+ nz = 1;
+ for (i = 0; i < sce->ics.swb_sizes[g]; i++) {
+ float t = fabsf(coefs[w2*128+i]);
+ if (t > 0.0f)
+ qmin = FFMIN(qmin, t);
+ qmax = FFMAX(qmax, t);
+ }
+ }
+ if (nz) {
+ int minscale, maxscale;
+ float minrd = INFINITY;
+ float maxval;
+ //minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped
+ minscale = coef2minsf(qmin);
+ //maximum scalefactor index is when maximum coefficient after quantizing is still not zero
+ maxscale = coef2maxsf(qmax);
+ minscale = av_clip(minscale - q0, 0, TRELLIS_STATES - 1);
+ maxscale = av_clip(maxscale - q0, 0, TRELLIS_STATES);
+ maxval = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], s->scoefs+start);
+ for (q = minscale; q < maxscale; q++) {
+ float dist = 0;
+ int cb = find_min_book(maxval, sce->sf_idx[w*16+g]);
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];
+ dist += quantize_band_cost(s, coefs + w2*128, s->scoefs + start + w2*128, sce->ics.swb_sizes[g],
+ q + q0, cb, lambda / band->threshold, INFINITY, NULL);
+ }
+ minrd = FFMIN(minrd, dist);
+
+ for (i = 0; i < q1 - q0; i++) {
+ float cost;
+ cost = paths[idx - 1][i].cost + dist
+ + ff_aac_scalefactor_bits[q - i + SCALE_DIFF_ZERO];
+ if (cost < paths[idx][q].cost) {
+ paths[idx][q].cost = cost;
+ paths[idx][q].prev = i;
+ }
+ }
+ }
+ } else {
+ for (q = 0; q < q1 - q0; q++) {
+ paths[idx][q].cost = paths[idx - 1][q].cost + 1;
+ paths[idx][q].prev = q;
+ }
+ }
+ sce->zeroes[w*16+g] = !nz;
+ start += sce->ics.swb_sizes[g];
+ idx++;
+ }
+ }
+ idx--;
+ mincost = paths[idx][0].cost;
+ minq = 0;
+ for (i = 1; i < TRELLIS_STATES; i++) {
+ if (paths[idx][i].cost < mincost) {
+ mincost = paths[idx][i].cost;
+ minq = i;
+ }
+ }
+ while (idx) {
+ sce->sf_idx[bandaddr[idx]] = minq + q0;
+ minq = paths[idx][minq].prev;
+ idx--;
+ }
+ //set the same quantizers inside window groups
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w])
+ for (g = 0; g < sce->ics.num_swb; g++)
+ for (w2 = 1; w2 < sce->ics.group_len[w]; w2++)
+ sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g];
+}
+
+/**
+ * two-loop quantizers search taken from ISO 13818-7 Appendix C
+ */
+static void search_for_quantizers_twoloop(AVCodecContext *avctx,
+ AACEncContext *s,
+ SingleChannelElement *sce,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels;
+ float dists[128], uplims[128];
+ float maxvals[128];
+ int fflag, minscaler;
+ int its = 0;
+ int allz = 0;
+ float minthr = INFINITY;
+
+ //XXX: some heuristic to determine initial quantizers will reduce search time
+ memset(dists, 0, sizeof(dists));
+ //determine zero bands and upper limits
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ int nz = 0;
+ float uplim = 0.0f;
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];
+ uplim += band->threshold;
+ if (band->energy <= band->threshold || band->threshold == 0.0f) {
+ sce->zeroes[(w+w2)*16+g] = 1;
+ continue;
+ }
+ nz = 1;
+ }
+ uplims[w*16+g] = uplim *512;
+ sce->zeroes[w*16+g] = !nz;
+ if (nz)
+ minthr = FFMIN(minthr, uplim);
+ allz |= nz;
+ }
+ }
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ if (sce->zeroes[w*16+g]) {
+ sce->sf_idx[w*16+g] = SCALE_ONE_POS;
+ continue;
+ }
+ sce->sf_idx[w*16+g] = SCALE_ONE_POS + FFMIN(log2f(uplims[w*16+g]/minthr)*4,59);
+ }
+ }
+
+ if (!allz)
+ return;
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *scaled = s->scoefs + start;
+ maxvals[w*16+g] = find_max_val(sce->ics.group_len[w], sce->ics.swb_sizes[g], scaled);
+ start += sce->ics.swb_sizes[g];
+ }
+ }
+
+ //perform two-loop search
+ //outer loop - improve quality
+ do {
+ int tbits, qstep;
+ minscaler = sce->sf_idx[0];
+ //inner loop - quantize spectrum to fit into given number of bits
+ qstep = its ? 1 : 32;
+ do {
+ int prev = -1;
+ tbits = 0;
+ fflag = 0;
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *coefs = sce->coeffs + start;
+ const float *scaled = s->scoefs + start;
+ int bits = 0;
+ int cb;
+ float dist = 0.0f;
+
+ if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) {
+ start += sce->ics.swb_sizes[g];
+ continue;
+ }
+ minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]);
+ cb = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ int b;
+ dist += quantize_band_cost(s, coefs + w2*128,
+ scaled + w2*128,
+ sce->ics.swb_sizes[g],
+ sce->sf_idx[w*16+g],
+ cb,
+ 1.0f,
+ INFINITY,
+ &b);
+ bits += b;
+ }
+ dists[w*16+g] = dist - bits;
+ if (prev != -1) {
+ bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO];
+ }
+ tbits += bits;
+ start += sce->ics.swb_sizes[g];
+ prev = sce->sf_idx[w*16+g];
+ }
+ }
+ if (tbits > destbits) {
+ for (i = 0; i < 128; i++)
+ if (sce->sf_idx[i] < 218 - qstep)
+ sce->sf_idx[i] += qstep;
+ } else {
+ for (i = 0; i < 128; i++)
+ if (sce->sf_idx[i] > 60 - qstep)
+ sce->sf_idx[i] -= qstep;
+ }
+ qstep >>= 1;
+ if (!qstep && tbits > destbits*1.02 && sce->sf_idx[0] < 217)
+ qstep = 1;
+ } while (qstep);
+
+ fflag = 0;
+ minscaler = av_clip(minscaler, 60, 255 - SCALE_MAX_DIFF);
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ int prevsc = sce->sf_idx[w*16+g];
+ if (dists[w*16+g] > uplims[w*16+g] && sce->sf_idx[w*16+g] > 60) {
+ if (find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]-1))
+ sce->sf_idx[w*16+g]--;
+ else //Try to make sure there is some energy in every band
+ sce->sf_idx[w*16+g]-=2;
+ }
+ sce->sf_idx[w*16+g] = av_clip(sce->sf_idx[w*16+g], minscaler, minscaler + SCALE_MAX_DIFF);
+ sce->sf_idx[w*16+g] = FFMIN(sce->sf_idx[w*16+g], 219);
+ if (sce->sf_idx[w*16+g] != prevsc)
+ fflag = 1;
+ sce->band_type[w*16+g] = find_min_book(maxvals[w*16+g], sce->sf_idx[w*16+g]);
+ }
+ }
+ its++;
+ } while (fflag && its < 10);
+}
+
+static void search_for_quantizers_faac(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ float uplim[128], maxq[128];
+ int minq, maxsf;
+ float distfact = ((sce->ics.num_windows > 1) ? 85.80 : 147.84) / lambda;
+ int last = 0, lastband = 0, curband = 0;
+ float avg_energy = 0.0;
+ if (sce->ics.num_windows == 1) {
+ start = 0;
+ for (i = 0; i < 1024; i++) {
+ if (i - start >= sce->ics.swb_sizes[curband]) {
+ start += sce->ics.swb_sizes[curband];
+ curband++;
+ }
+ if (sce->coeffs[i]) {
+ avg_energy += sce->coeffs[i] * sce->coeffs[i];
+ last = i;
+ lastband = curband;
+ }
+ }
+ } else {
+ for (w = 0; w < 8; w++) {
+ const float *coeffs = sce->coeffs + w*128;
+ start = 0;
+ for (i = 0; i < 128; i++) {
+ if (i - start >= sce->ics.swb_sizes[curband]) {
+ start += sce->ics.swb_sizes[curband];
+ curband++;
+ }
+ if (coeffs[i]) {
+ avg_energy += coeffs[i] * coeffs[i];
+ last = FFMAX(last, i);
+ lastband = FFMAX(lastband, curband);
+ }
+ }
+ }
+ }
+ last++;
+ avg_energy /= last;
+ if (avg_energy == 0.0f) {
+ for (i = 0; i < FF_ARRAY_ELEMS(sce->sf_idx); i++)
+ sce->sf_idx[i] = SCALE_ONE_POS;
+ return;
+ }
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ float *coefs = sce->coeffs + start;
+ const int size = sce->ics.swb_sizes[g];
+ int start2 = start, end2 = start + size, peakpos = start;
+ float maxval = -1, thr = 0.0f, t;
+ maxq[w*16+g] = 0.0f;
+ if (g > lastband) {
+ maxq[w*16+g] = 0.0f;
+ start += size;
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++)
+ memset(coefs + w2*128, 0, sizeof(coefs[0])*size);
+ continue;
+ }
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ for (i = 0; i < size; i++) {
+ float t = coefs[w2*128+i]*coefs[w2*128+i];
+ maxq[w*16+g] = FFMAX(maxq[w*16+g], fabsf(coefs[w2*128 + i]));
+ thr += t;
+ if (sce->ics.num_windows == 1 && maxval < t) {
+ maxval = t;
+ peakpos = start+i;
+ }
+ }
+ }
+ if (sce->ics.num_windows == 1) {
+ start2 = FFMAX(peakpos - 2, start2);
+ end2 = FFMIN(peakpos + 3, end2);
+ } else {
+ start2 -= start;
+ end2 -= start;
+ }
+ start += size;
+ thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband);
+ t = 1.0 - (1.0 * start2 / last);
+ uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075);
+ }
+ }
+ memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
+ abs_pow34_v(s->scoefs, sce->coeffs, 1024);
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ const float *coefs = sce->coeffs + start;
+ const float *scaled = s->scoefs + start;
+ const int size = sce->ics.swb_sizes[g];
+ int scf, prev_scf, step;
+ int min_scf = -1, max_scf = 256;
+ float curdiff;
+ if (maxq[w*16+g] < 21.544) {
+ sce->zeroes[w*16+g] = 1;
+ start += size;
+ continue;
+ }
+ sce->zeroes[w*16+g] = 0;
+ scf = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);
+ step = 16;
+ for (;;) {
+ float dist = 0.0f;
+ int quant_max;
+
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ int b;
+ dist += quantize_band_cost(s, coefs + w2*128,
+ scaled + w2*128,
+ sce->ics.swb_sizes[g],
+ scf,
+ ESC_BT,
+ lambda,
+ INFINITY,
+ &b);
+ dist -= b;
+ }
+ dist *= 1.0f / 512.0f / lambda;
+ quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[200 - scf + SCALE_ONE_POS - SCALE_DIV_512]);
+ if (quant_max >= 8191) { // too much, return to the previous quantizer
+ sce->sf_idx[w*16+g] = prev_scf;
+ break;
+ }
+ prev_scf = scf;
+ curdiff = fabsf(dist - uplim[w*16+g]);
+ if (curdiff <= 1.0f)
+ step = 0;
+ else
+ step = log2f(curdiff);
+ if (dist > uplim[w*16+g])
+ step = -step;
+ scf += step;
+ scf = av_clip_uint8(scf);
+ step = scf - prev_scf;
+ if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) {
+ sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf);
+ break;
+ }
+ if (step > 0)
+ min_scf = prev_scf;
+ else
+ max_scf = prev_scf;
+ }
+ start += size;
+ }
+ }
+ minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX;
+ for (i = 1; i < 128; i++) {
+ if (!sce->sf_idx[i])
+ sce->sf_idx[i] = sce->sf_idx[i-1];
+ else
+ minq = FFMIN(minq, sce->sf_idx[i]);
+ }
+ if (minq == INT_MAX)
+ minq = 0;
+ minq = FFMIN(minq, SCALE_MAX_POS);
+ maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS);
+ for (i = 126; i >= 0; i--) {
+ if (!sce->sf_idx[i])
+ sce->sf_idx[i] = sce->sf_idx[i+1];
+ sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf);
+ }
+}
+
+static void search_for_quantizers_fast(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ int minq = 255;
+
+ memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = w*128;
+ for (g = 0; g < sce->ics.num_swb; g++) {
+ for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
+ FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g];
+ if (band->energy <= band->threshold) {
+ sce->sf_idx[(w+w2)*16+g] = 218;
+ sce->zeroes[(w+w2)*16+g] = 1;
+ } else {
+ sce->sf_idx[(w+w2)*16+g] = av_clip(SCALE_ONE_POS - SCALE_DIV_512 + log2f(band->threshold), 80, 218);
+ sce->zeroes[(w+w2)*16+g] = 0;
+ }
+ minq = FFMIN(minq, sce->sf_idx[(w+w2)*16+g]);
+ }
+ }
+ }
+ for (i = 0; i < 128; i++) {
+ sce->sf_idx[i] = 140;
+ //av_clip(sce->sf_idx[i], minq, minq + SCALE_MAX_DIFF - 1);
+ }
+ //set the same quantizers inside window groups
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w])
+ for (g = 0; g < sce->ics.num_swb; g++)
+ for (w2 = 1; w2 < sce->ics.group_len[w]; w2++)
+ sce->sf_idx[(w+w2)*16+g] = sce->sf_idx[w*16+g];
+}
+
+static void search_for_ms(AACEncContext *s, ChannelElement *cpe,
+ const float lambda)
+{
+ int start = 0, i, w, w2, g;
+ float M[128], S[128];
+ float *L34 = s->scoefs, *R34 = s->scoefs + 128, *M34 = s->scoefs + 128*2, *S34 = s->scoefs + 128*3;
+ SingleChannelElement *sce0 = &cpe->ch[0];
+ SingleChannelElement *sce1 = &cpe->ch[1];
+ if (!cpe->common_window)
+ return;
+ for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) {
+ for (g = 0; g < sce0->ics.num_swb; g++) {
+ if (!cpe->ch[0].zeroes[w*16+g] && !cpe->ch[1].zeroes[w*16+g]) {
+ float dist1 = 0.0f, dist2 = 0.0f;
+ for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) {
+ FFPsyBand *band0 = &s->psy.psy_bands[(s->cur_channel+0)*PSY_MAX_BANDS+(w+w2)*16+g];
+ FFPsyBand *band1 = &s->psy.psy_bands[(s->cur_channel+1)*PSY_MAX_BANDS+(w+w2)*16+g];
+ float minthr = FFMIN(band0->threshold, band1->threshold);
+ float maxthr = FFMAX(band0->threshold, band1->threshold);
+ for (i = 0; i < sce0->ics.swb_sizes[g]; i++) {
+ M[i] = (sce0->coeffs[start+w2*128+i]
+ + sce1->coeffs[start+w2*128+i]) * 0.5;
+ S[i] = M[i]
+ - sce1->coeffs[start+w2*128+i];
+ }
+ abs_pow34_v(L34, sce0->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(R34, sce1->coeffs+start+w2*128, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(M34, M, sce0->ics.swb_sizes[g]);
+ abs_pow34_v(S34, S, sce0->ics.swb_sizes[g]);
+ dist1 += quantize_band_cost(s, sce0->coeffs + start + w2*128,
+ L34,
+ sce0->ics.swb_sizes[g],
+ sce0->sf_idx[(w+w2)*16+g],
+ sce0->band_type[(w+w2)*16+g],
+ lambda / band0->threshold, INFINITY, NULL);
+ dist1 += quantize_band_cost(s, sce1->coeffs + start + w2*128,
+ R34,
+ sce1->ics.swb_sizes[g],
+ sce1->sf_idx[(w+w2)*16+g],
+ sce1->band_type[(w+w2)*16+g],
+ lambda / band1->threshold, INFINITY, NULL);
+ dist2 += quantize_band_cost(s, M,
+ M34,
+ sce0->ics.swb_sizes[g],
+ sce0->sf_idx[(w+w2)*16+g],
+ sce0->band_type[(w+w2)*16+g],
+ lambda / maxthr, INFINITY, NULL);
+ dist2 += quantize_band_cost(s, S,
+ S34,
+ sce1->ics.swb_sizes[g],
+ sce1->sf_idx[(w+w2)*16+g],
+ sce1->band_type[(w+w2)*16+g],
+ lambda / minthr, INFINITY, NULL);
+ }
+ cpe->ms_mask[w*16+g] = dist2 < dist1;
+ }
+ start += sce0->ics.swb_sizes[g];
+ }
+ }
+}
+
+AACCoefficientsEncoder ff_aac_coders[] = {
+ {
+ search_for_quantizers_faac,
+ encode_window_bands_info,
+ quantize_and_encode_band,
+ search_for_ms,
+ },
+ {
+ search_for_quantizers_anmr,
+ encode_window_bands_info,
+ quantize_and_encode_band,
+ search_for_ms,
+ },
+ {
+ search_for_quantizers_twoloop,
+ codebook_trellis_rate,
+ quantize_and_encode_band,
+ search_for_ms,
+ },
+ {
+ search_for_quantizers_fast,
+ encode_window_bands_info,
+ quantize_and_encode_band,
+ search_for_ms,
+ },
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacdec.c b/samples/rtsp_player/ffmpeg/libavcodec/aacdec.c
new file mode 100755
index 0000000..7b1e501
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacdec.c
@@ -0,0 +1,2552 @@
+/*
+ * AAC decoder
+ * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
+ * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * AAC LATM decoder
+ * Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
+ * Copyright (c) 2010 Janne Grunau <janne-ffmpeg@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC decoder
+ * @author Oded Shimon ( ods15 ods15 dyndns org )
+ * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
+ */
+
+/*
+ * supported tools
+ *
+ * Support? Name
+ * N (code in SoC repo) gain control
+ * Y block switching
+ * Y window shapes - standard
+ * N window shapes - Low Delay
+ * Y filterbank - standard
+ * N (code in SoC repo) filterbank - Scalable Sample Rate
+ * Y Temporal Noise Shaping
+ * Y Long Term Prediction
+ * Y intensity stereo
+ * Y channel coupling
+ * Y frequency domain prediction
+ * Y Perceptual Noise Substitution
+ * Y Mid/Side stereo
+ * N Scalable Inverse AAC Quantization
+ * N Frequency Selective Switch
+ * N upsampling filter
+ * Y quantization & coding - AAC
+ * N quantization & coding - TwinVQ
+ * N quantization & coding - BSAC
+ * N AAC Error Resilience tools
+ * N Error Resilience payload syntax
+ * N Error Protection tool
+ * N CELP
+ * N Silence Compression
+ * N HVXC
+ * N HVXC 4kbits/s VR
+ * N Structured Audio tools
+ * N Structured Audio Sample Bank Format
+ * N MIDI
+ * N Harmonic and Individual Lines plus Noise
+ * N Text-To-Speech Interface
+ * Y Spectral Band Replication
+ * Y (not in this code) Layer-1
+ * Y (not in this code) Layer-2
+ * Y (not in this code) Layer-3
+ * N SinuSoidal Coding (Transient, Sinusoid, Noise)
+ * Y Parametric Stereo
+ * N Direct Stream Transfer
+ *
+ * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
+ * - HE AAC v2 comprises LC AAC with Spectral Band Replication and
+ Parametric Stereo.
+ */
+
+
+#include "avcodec.h"
+#include "internal.h"
+#include "get_bits.h"
+#include "dsputil.h"
+#include "fft.h"
+#include "fmtconvert.h"
+#include "lpc.h"
+#include "kbdwin.h"
+#include "sinewin.h"
+
+#include "aac.h"
+#include "aactab.h"
+#include "aacdectab.h"
+#include "cbrt_tablegen.h"
+#include "sbr.h"
+#include "aacsbr.h"
+#include "mpeg4audio.h"
+#include "aacadtsdec.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <math.h>
+#include <string.h>
+
+#if ARCH_ARM
+# include "arm/aac.h"
+#endif
+
+union float754 {
+ float f;
+ uint32_t i;
+};
+
+static VLC vlc_scalefactors;
+static VLC vlc_spectral[11];
+
+static const char overread_err[] = "Input buffer exhausted before END element found\n";
+
+static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
+{
+ // For PCE based channel configurations map the channels solely based on tags.
+ if (!ac->m4ac.chan_config) {
+ return ac->tag_che_map[type][elem_id];
+ }
+ // For indexed channel configurations map the channels solely based on position.
+ switch (ac->m4ac.chan_config) {
+ case 7:
+ if (ac->tags_mapped == 3 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
+ }
+ case 6:
+ /* Some streams incorrectly code 5.1 audio as SCE[0] CPE[0] CPE[1] SCE[1]
+ instead of SCE[0] CPE[0] CPE[1] LFE[0]. If we seem to have
+ encountered such a stream, transfer the LFE[0] element to the SCE[1]'s mapping */
+ if (ac->tags_mapped == tags_per_config[ac->m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
+ }
+ case 5:
+ if (ac->tags_mapped == 2 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
+ }
+ case 4:
+ if (ac->tags_mapped == 2 && ac->m4ac.chan_config == 4 && type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
+ }
+ case 3:
+ case 2:
+ if (ac->tags_mapped == (ac->m4ac.chan_config != 2) && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
+ } else if (ac->m4ac.chan_config == 2) {
+ return NULL;
+ }
+ case 1:
+ if (!ac->tags_mapped && type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
+ }
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * Check for the channel element in the current channel position configuration.
+ * If it exists, make sure the appropriate element is allocated and map the
+ * channel order to match the internal FFmpeg channel layout.
+ *
+ * @param che_pos current channel position configuration
+ * @param type channel element type
+ * @param id channel element id
+ * @param channels count of the number of channels in the configuration
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static av_cold int che_configure(AACContext *ac,
+ enum ChannelPosition che_pos[4][MAX_ELEM_ID],
+ int type, int id, int *channels)
+{
+ if (che_pos[type][id]) {
+ if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
+ return AVERROR(ENOMEM);
+ ff_aac_sbr_ctx_init(&ac->che[type][id]->sbr);
+ if (type != TYPE_CCE) {
+ ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
+ if (type == TYPE_CPE ||
+ (type == TYPE_SCE && ac->m4ac.ps == 1)) {
+ ac->output_data[(*channels)++] = ac->che[type][id]->ch[1].ret;
+ }
+ }
+ } else {
+ if (ac->che[type][id])
+ ff_aac_sbr_ctx_close(&ac->che[type][id]->sbr);
+ av_freep(&ac->che[type][id]);
+ }
+ return 0;
+}
+
+/**
+ * Configure output channel order based on the current program configuration element.
+ *
+ * @param che_pos current channel position configuration
+ * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static av_cold int output_configure(AACContext *ac,
+ enum ChannelPosition che_pos[4][MAX_ELEM_ID],
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ int channel_config, enum OCStatus oc_type)
+{
+ AVCodecContext *avctx = ac->avctx;
+ int i, type, channels = 0, ret;
+
+ if (new_che_pos != che_pos)
+ memcpy(che_pos, new_che_pos, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
+
+ if (channel_config) {
+ for (i = 0; i < tags_per_config[channel_config]; i++) {
+ if ((ret = che_configure(ac, che_pos,
+ aac_channel_layout_map[channel_config - 1][i][0],
+ aac_channel_layout_map[channel_config - 1][i][1],
+ &channels)))
+ return ret;
+ }
+
+ memset(ac->tag_che_map, 0, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+
+ avctx->channel_layout = aac_channel_layout[channel_config - 1];
+ } else {
+ /* Allocate or free elements depending on if they are in the
+ * current program configuration.
+ *
+ * Set up default 1:1 output mapping.
+ *
+ * For a 5.1 stream the output order will be:
+ * [ Center ] [ Front Left ] [ Front Right ] [ LFE ] [ Surround Left ] [ Surround Right ]
+ */
+
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ for (type = 0; type < 4; type++) {
+ if ((ret = che_configure(ac, che_pos, type, i, &channels)))
+ return ret;
+ }
+ }
+
+ memcpy(ac->tag_che_map, ac->che, 4 * MAX_ELEM_ID * sizeof(ac->che[0][0]));
+
+ avctx->channel_layout = 0;
+ }
+
+ avctx->channels = channels;
+
+ ac->output_configured = oc_type;
+
+ return 0;
+}
+
+/**
+ * Decode an array of 4 bit element IDs, optionally interleaved with a stereo/mono switching bit.
+ *
+ * @param cpe_map Stereo (Channel Pair Element) map, NULL if stereo bit is not present.
+ * @param sce_map mono (Single Channel Element) map
+ * @param type speaker type/position for these channels
+ */
+static void decode_channel_map(enum ChannelPosition *cpe_map,
+ enum ChannelPosition *sce_map,
+ enum ChannelPosition type,
+ GetBitContext *gb, int n)
+{
+ while (n--) {
+ enum ChannelPosition *map = cpe_map && get_bits1(gb) ? cpe_map : sce_map; // stereo or mono map
+ map[get_bits(gb, 4)] = type;
+ }
+}
+
+/**
+ * Decode program configuration element; reference: table 4.2.
+ *
+ * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ GetBitContext *gb)
+{
+ int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc, sampling_index;
+ int comment_len;
+
+ skip_bits(gb, 2); // object_type
+
+ sampling_index = get_bits(gb, 4);
+ if (m4ac->sampling_index != sampling_index)
+ av_log(avctx, AV_LOG_WARNING, "Sample rate index in program config element does not match the sample rate index configured by the container.\n");
+
+ num_front = get_bits(gb, 4);
+ num_side = get_bits(gb, 4);
+ num_back = get_bits(gb, 4);
+ num_lfe = get_bits(gb, 2);
+ num_assoc_data = get_bits(gb, 3);
+ num_cc = get_bits(gb, 4);
+
+ if (get_bits1(gb))
+ skip_bits(gb, 4); // mono_mixdown_tag
+ if (get_bits1(gb))
+ skip_bits(gb, 4); // stereo_mixdown_tag
+
+ if (get_bits1(gb))
+ skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
+
+ decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_FRONT, gb, num_front);
+ decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_SIDE, gb, num_side );
+ decode_channel_map(new_che_pos[TYPE_CPE], new_che_pos[TYPE_SCE], AAC_CHANNEL_BACK, gb, num_back );
+ decode_channel_map(NULL, new_che_pos[TYPE_LFE], AAC_CHANNEL_LFE, gb, num_lfe );
+
+ skip_bits_long(gb, 4 * num_assoc_data);
+
+ decode_channel_map(new_che_pos[TYPE_CCE], new_che_pos[TYPE_CCE], AAC_CHANNEL_CC, gb, num_cc );
+
+ align_get_bits(gb);
+
+ /* comment field, first byte is length */
+ comment_len = get_bits(gb, 8) * 8;
+ if (get_bits_left(gb) < comment_len) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ skip_bits_long(gb, comment_len);
+ return 0;
+}
+
+/**
+ * Set up channel positions based on a default channel configuration
+ * as specified in table 1.17.
+ *
+ * @param new_che_pos New channel position configuration - we only do something if it differs from the current one.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static av_cold int set_default_channel_config(AVCodecContext *avctx,
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID],
+ int channel_config)
+{
+ if (channel_config < 1 || channel_config > 7) {
+ av_log(avctx, AV_LOG_ERROR, "invalid default channel configuration (%d)\n",
+ channel_config);
+ return -1;
+ }
+
+ /* default channel configurations:
+ *
+ * 1ch : front center (mono)
+ * 2ch : L + R (stereo)
+ * 3ch : front center + L + R
+ * 4ch : front center + L + R + back center
+ * 5ch : front center + L + R + back stereo
+ * 6ch : front center + L + R + back stereo + LFE
+ * 7ch : front center + L + R + outer front left + outer front right + back stereo + LFE
+ */
+
+ if (channel_config != 2)
+ new_che_pos[TYPE_SCE][0] = AAC_CHANNEL_FRONT; // front center (or mono)
+ if (channel_config > 1)
+ new_che_pos[TYPE_CPE][0] = AAC_CHANNEL_FRONT; // L + R (or stereo)
+ if (channel_config == 4)
+ new_che_pos[TYPE_SCE][1] = AAC_CHANNEL_BACK; // back center
+ if (channel_config > 4)
+ new_che_pos[TYPE_CPE][(channel_config == 7) + 1]
+ = AAC_CHANNEL_BACK; // back stereo
+ if (channel_config > 5)
+ new_che_pos[TYPE_LFE][0] = AAC_CHANNEL_LFE; // LFE
+ if (channel_config == 7)
+ new_che_pos[TYPE_CPE][1] = AAC_CHANNEL_FRONT; // outer front left + outer front right
+
+ return 0;
+}
+
+/**
+ * Decode GA "General Audio" specific configuration; reference: table 4.1.
+ *
+ * @param ac pointer to AACContext, may be null
+ * @param avctx pointer to AVCCodecContext, used for logging
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
+ GetBitContext *gb,
+ MPEG4AudioConfig *m4ac,
+ int channel_config)
+{
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
+ int extension_flag, ret;
+
+ if (get_bits1(gb)) { // frameLengthFlag
+ av_log_missing_feature(avctx, "960/120 MDCT window is", 1);
+ return -1;
+ }
+
+ if (get_bits1(gb)) // dependsOnCoreCoder
+ skip_bits(gb, 14); // coreCoderDelay
+ extension_flag = get_bits1(gb);
+
+ if (m4ac->object_type == AOT_AAC_SCALABLE ||
+ m4ac->object_type == AOT_ER_AAC_SCALABLE)
+ skip_bits(gb, 3); // layerNr
+
+ memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
+ if (channel_config == 0) {
+ skip_bits(gb, 4); // element_instance_tag
+ if ((ret = decode_pce(avctx, m4ac, new_che_pos, gb)))
+ return ret;
+ } else {
+ if ((ret = set_default_channel_config(avctx, new_che_pos, channel_config)))
+ return ret;
+ }
+ if (ac && (ret = output_configure(ac, ac->che_pos, new_che_pos, channel_config, OC_GLOBAL_HDR)))
+ return ret;
+
+ if (extension_flag) {
+ switch (m4ac->object_type) {
+ case AOT_ER_BSAC:
+ skip_bits(gb, 5); // numOfSubFrame
+ skip_bits(gb, 11); // layer_length
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_LTP:
+ case AOT_ER_AAC_SCALABLE:
+ case AOT_ER_AAC_LD:
+ skip_bits(gb, 3); /* aacSectionDataResilienceFlag
+ * aacScalefactorDataResilienceFlag
+ * aacSpectralDataResilienceFlag
+ */
+ break;
+ }
+ skip_bits1(gb); // extensionFlag3 (TBD in version 3)
+ }
+ return 0;
+}
+
+/**
+ * Decode audio specific configuration; reference: table 1.13.
+ *
+ * @param ac pointer to AACContext, may be null
+ * @param avctx pointer to AVCCodecContext, used for logging
+ * @param m4ac pointer to MPEG4AudioConfig, used for parsing
+ * @param data pointer to AVCodecContext extradata
+ * @param data_size size of AVCCodecContext extradata
+ *
+ * @return Returns error status or number of consumed bits. <0 - error
+ */
+static int decode_audio_specific_config(AACContext *ac,
+ AVCodecContext *avctx,
+ MPEG4AudioConfig *m4ac,
+ const uint8_t *data, int data_size)
+{
+ GetBitContext gb;
+ int i;
+
+ av_dlog(avctx, "extradata size %d\n", avctx->extradata_size);
+ for (i = 0; i < avctx->extradata_size; i++)
+ av_dlog(avctx, "%02x ", avctx->extradata[i]);
+ av_dlog(avctx, "\n");
+
+ init_get_bits(&gb, data, data_size * 8);
+
+ if ((i = ff_mpeg4audio_get_config(m4ac, data, data_size)) < 0)
+ return -1;
+ if (m4ac->sampling_index > 12) {
+ av_log(avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", m4ac->sampling_index);
+ return -1;
+ }
+ if (m4ac->sbr == 1 && m4ac->ps == -1)
+ m4ac->ps = 1;
+
+ skip_bits_long(&gb, i);
+
+ switch (m4ac->object_type) {
+ case AOT_AAC_MAIN:
+ case AOT_AAC_LC:
+ case AOT_AAC_LTP:
+ if (decode_ga_specific_config(ac, avctx, &gb, m4ac, m4ac->chan_config))
+ return -1;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Audio object type %s%d is not supported.\n",
+ m4ac->sbr == 1? "SBR+" : "", m4ac->object_type);
+ return -1;
+ }
+
+ av_dlog(avctx, "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
+ m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
+ m4ac->sample_rate, m4ac->sbr, m4ac->ps);
+
+ return get_bits_count(&gb);
+}
+
+/**
+ * linear congruential pseudorandom number generator
+ *
+ * @param previous_val pointer to the current state of the generator
+ *
+ * @return Returns a 32-bit pseudorandom integer
+ */
+static av_always_inline int lcg_random(int previous_val)
+{
+ return previous_val * 1664525 + 1013904223;
+}
+
+static av_always_inline void reset_predict_state(PredictorState *ps)
+{
+ ps->r0 = 0.0f;
+ ps->r1 = 0.0f;
+ ps->cor0 = 0.0f;
+ ps->cor1 = 0.0f;
+ ps->var0 = 1.0f;
+ ps->var1 = 1.0f;
+}
+
+static void reset_all_predictors(PredictorState *ps)
+{
+ int i;
+ for (i = 0; i < MAX_PREDICTORS; i++)
+ reset_predict_state(&ps[i]);
+}
+
+static void reset_predictor_group(PredictorState *ps, int group_num)
+{
+ int i;
+ for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
+ reset_predict_state(&ps[i]);
+}
+
+#define AAC_INIT_VLC_STATIC(num, size) \
+ INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
+ ff_aac_spectral_bits[num], sizeof( ff_aac_spectral_bits[num][0]), sizeof( ff_aac_spectral_bits[num][0]), \
+ ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), sizeof(ff_aac_spectral_codes[num][0]), \
+ size);
+
+static av_cold int aac_decode_init(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+
+ ac->avctx = avctx;
+ ac->m4ac.sample_rate = avctx->sample_rate;
+
+ if (avctx->extradata_size > 0) {
+ if (decode_audio_specific_config(ac, ac->avctx, &ac->m4ac,
+ avctx->extradata,
+ avctx->extradata_size) < 0)
+ return -1;
+ }
+
+ /* ffdshow custom code */
+#if CONFIG_AUDIO_FLOAT
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
+#else
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+#endif
+
+ AAC_INIT_VLC_STATIC( 0, 304);
+ AAC_INIT_VLC_STATIC( 1, 270);
+ AAC_INIT_VLC_STATIC( 2, 550);
+ AAC_INIT_VLC_STATIC( 3, 300);
+ AAC_INIT_VLC_STATIC( 4, 328);
+ AAC_INIT_VLC_STATIC( 5, 294);
+ AAC_INIT_VLC_STATIC( 6, 306);
+ AAC_INIT_VLC_STATIC( 7, 268);
+ AAC_INIT_VLC_STATIC( 8, 510);
+ AAC_INIT_VLC_STATIC( 9, 366);
+ AAC_INIT_VLC_STATIC(10, 462);
+
+ ff_aac_sbr_init();
+
+ dsputil_init(&ac->dsp, avctx);
+ ff_fmt_convert_init(&ac->fmt_conv, avctx);
+
+ ac->random_state = 0x1f2e3d4c;
+
+ // -1024 - Compensate wrong IMDCT method.
+ // 60 - Required to scale values to the correct range [-32768,32767]
+ // for float to int16 conversion. (1 << (60 / 4)) == 32768
+ ac->sf_scale = 1. / -1024.;
+ ac->sf_offset = 60;
+
+ ff_aac_tableinit();
+
+ INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
+ ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]),
+ ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]),
+ 352);
+
+ ff_mdct_init(&ac->mdct, 11, 1, 1.0);
+ ff_mdct_init(&ac->mdct_small, 8, 1, 1.0);
+ ff_mdct_init(&ac->mdct_ltp, 11, 0, 1.0);
+ // window initialization
+ ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
+ ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
+ ff_init_ff_sine_windows(10);
+ ff_init_ff_sine_windows( 7);
+
+ cbrt_tableinit();
+
+ return 0;
+}
+
+/**
+ * Skip data_stream_element; reference: table 4.10.
+ */
+static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
+{
+ int byte_align = get_bits1(gb);
+ int count = get_bits(gb, 8);
+ if (count == 255)
+ count += get_bits(gb, 8);
+ if (byte_align)
+ align_get_bits(gb);
+
+ if (get_bits_left(gb) < 8 * count) {
+ av_log(ac->avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ skip_bits_long(gb, 8 * count);
+ return 0;
+}
+
+static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
+ GetBitContext *gb)
+{
+ int sfb;
+ if (get_bits1(gb)) {
+ ics->predictor_reset_group = get_bits(gb, 5);
+ if (ics->predictor_reset_group == 0 || ics->predictor_reset_group > 30) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Invalid Predictor Reset Group.\n");
+ return -1;
+ }
+ }
+ for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->m4ac.sampling_index]); sfb++) {
+ ics->prediction_used[sfb] = get_bits1(gb);
+ }
+ return 0;
+}
+
+/**
+ * Decode Long Term Prediction data; reference: table 4.xx.
+ */
+static void decode_ltp(AACContext *ac, LongTermPrediction *ltp,
+ GetBitContext *gb, uint8_t max_sfb)
+{
+ int sfb;
+
+ ltp->lag = get_bits(gb, 11);
+ ltp->coef = ltp_coef[get_bits(gb, 3)] * ac->sf_scale;
+ for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
+ ltp->used[sfb] = get_bits1(gb);
+}
+
+/**
+ * Decode Individual Channel Stream info; reference: table 4.6.
+ *
+ * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
+ */
+static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
+ GetBitContext *gb, int common_window)
+{
+ if (get_bits1(gb)) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
+ memset(ics, 0, sizeof(IndividualChannelStream));
+ return -1;
+ }
+ ics->window_sequence[1] = ics->window_sequence[0];
+ ics->window_sequence[0] = get_bits(gb, 2);
+ ics->use_kb_window[1] = ics->use_kb_window[0];
+ ics->use_kb_window[0] = get_bits1(gb);
+ ics->num_window_groups = 1;
+ ics->group_len[0] = 1;
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ int i;
+ ics->max_sfb = get_bits(gb, 4);
+ for (i = 0; i < 7; i++) {
+ if (get_bits1(gb)) {
+ ics->group_len[ics->num_window_groups - 1]++;
+ } else {
+ ics->num_window_groups++;
+ ics->group_len[ics->num_window_groups - 1] = 1;
+ }
+ }
+ ics->num_windows = 8;
+ ics->swb_offset = ff_swb_offset_128[ac->m4ac.sampling_index];
+ ics->num_swb = ff_aac_num_swb_128[ac->m4ac.sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_128[ac->m4ac.sampling_index];
+ ics->predictor_present = 0;
+ } else {
+ ics->max_sfb = get_bits(gb, 6);
+ ics->num_windows = 1;
+ ics->swb_offset = ff_swb_offset_1024[ac->m4ac.sampling_index];
+ ics->num_swb = ff_aac_num_swb_1024[ac->m4ac.sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_1024[ac->m4ac.sampling_index];
+ ics->predictor_present = get_bits1(gb);
+ ics->predictor_reset_group = 0;
+ if (ics->predictor_present) {
+ if (ac->m4ac.object_type == AOT_AAC_MAIN) {
+ if (decode_prediction(ac, ics, gb)) {
+ memset(ics, 0, sizeof(IndividualChannelStream));
+ return -1;
+ }
+ } else if (ac->m4ac.object_type == AOT_AAC_LC) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Prediction is not allowed in AAC-LC.\n");
+ memset(ics, 0, sizeof(IndividualChannelStream));
+ return -1;
+ } else {
+ if ((ics->ltp.present = get_bits(gb, 1)))
+ decode_ltp(ac, &ics->ltp, gb, ics->max_sfb);
+ }
+ }
+ }
+
+ if (ics->max_sfb > ics->num_swb) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Number of scalefactor bands in group (%d) exceeds limit (%d).\n",
+ ics->max_sfb, ics->num_swb);
+ memset(ics, 0, sizeof(IndividualChannelStream));
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Decode band types (section_data payload); reference: table 4.46.
+ *
+ * @param band_type array of the used band type
+ * @param band_type_run_end array of the last scalefactor band of a band type run
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_band_types(AACContext *ac, enum BandType band_type[120],
+ int band_type_run_end[120], GetBitContext *gb,
+ IndividualChannelStream *ics)
+{
+ int g, idx = 0;
+ const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ int k = 0;
+ while (k < ics->max_sfb) {
+ uint8_t sect_end = k;
+ int sect_len_incr;
+ int sect_band_type = get_bits(gb, 4);
+ if (sect_band_type == 12) {
+ av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
+ return -1;
+ }
+ while ((sect_len_incr = get_bits(gb, bits)) == (1 << bits) - 1)
+ sect_end += sect_len_incr;
+ sect_end += sect_len_incr;
+ if (get_bits_left(gb) < 0) {
+ av_log(ac->avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ if (sect_end > ics->max_sfb) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Number of bands (%d) exceeds limit (%d).\n",
+ sect_end, ics->max_sfb);
+ return -1;
+ }
+ for (; k < sect_end; k++) {
+ band_type [idx] = sect_band_type;
+ band_type_run_end[idx++] = sect_end;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode scalefactors; reference: table 4.47.
+ *
+ * @param global_gain first scalefactor value as scalefactors are differentially coded
+ * @param band_type array of the used band type
+ * @param band_type_run_end array of the last scalefactor band of a band type run
+ * @param sf array of scalefactors or intensity stereo positions
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_scalefactors(AACContext *ac, float sf[120], GetBitContext *gb,
+ unsigned int global_gain,
+ IndividualChannelStream *ics,
+ enum BandType band_type[120],
+ int band_type_run_end[120])
+{
+ const int sf_offset = ac->sf_offset + (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE ? 12 : 0);
+ int g, i, idx = 0;
+ int offset[3] = { global_gain, global_gain - 90, 100 };
+ int noise_flag = 1;
+ static const char *sf_str[3] = { "Global gain", "Noise gain", "Intensity stereo position" };
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb;) {
+ int run_end = band_type_run_end[idx];
+ if (band_type[idx] == ZERO_BT) {
+ for (; i < run_end; i++, idx++)
+ sf[idx] = 0.;
+ } else if ((band_type[idx] == INTENSITY_BT) || (band_type[idx] == INTENSITY_BT2)) {
+ for (; i < run_end; i++, idx++) {
+ offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
+ if (offset[2] > 255U) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "%s (%d) out of range.\n", sf_str[2], offset[2]);
+ return -1;
+ }
+ sf[idx] = ff_aac_pow2sf_tab[-offset[2] + 300];
+ }
+ } else if (band_type[idx] == NOISE_BT) {
+ for (; i < run_end; i++, idx++) {
+ if (noise_flag-- > 0)
+ offset[1] += get_bits(gb, 9) - 256;
+ else
+ offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
+ if (offset[1] > 255U) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "%s (%d) out of range.\n", sf_str[1], offset[1]);
+ return -1;
+ }
+ sf[idx] = -ff_aac_pow2sf_tab[offset[1] + sf_offset + 100];
+ }
+ } else {
+ for (; i < run_end; i++, idx++) {
+ offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
+ if (offset[0] > 255U) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "%s (%d) out of range.\n", sf_str[0], offset[0]);
+ return -1;
+ }
+ sf[idx] = -ff_aac_pow2sf_tab[ offset[0] + sf_offset];
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode pulse data; reference: table 4.7.
+ */
+static int decode_pulses(Pulse *pulse, GetBitContext *gb,
+ const uint16_t *swb_offset, int num_swb)
+{
+ int i, pulse_swb;
+ pulse->num_pulse = get_bits(gb, 2) + 1;
+ pulse_swb = get_bits(gb, 6);
+ if (pulse_swb >= num_swb)
+ return -1;
+ pulse->pos[0] = swb_offset[pulse_swb];
+ pulse->pos[0] += get_bits(gb, 5);
+ if (pulse->pos[0] > 1023)
+ return -1;
+ pulse->amp[0] = get_bits(gb, 4);
+ for (i = 1; i < pulse->num_pulse; i++) {
+ pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
+ if (pulse->pos[i] > 1023)
+ return -1;
+ pulse->amp[i] = get_bits(gb, 4);
+ }
+ return 0;
+}
+
+/**
+ * Decode Temporal Noise Shaping data; reference: table 4.48.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
+ GetBitContext *gb, const IndividualChannelStream *ics)
+{
+ int w, filt, i, coef_len, coef_res, coef_compress;
+ const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
+ const int tns_max_order = is8 ? 7 : ac->m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
+ for (w = 0; w < ics->num_windows; w++) {
+ if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
+ coef_res = get_bits1(gb);
+
+ for (filt = 0; filt < tns->n_filt[w]; filt++) {
+ int tmp2_idx;
+ tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
+
+ if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
+ av_log(ac->avctx, AV_LOG_ERROR, "TNS filter order %d is greater than maximum %d.\n",
+ tns->order[w][filt], tns_max_order);
+ tns->order[w][filt] = 0;
+ return -1;
+ }
+ if (tns->order[w][filt]) {
+ tns->direction[w][filt] = get_bits1(gb);
+ coef_compress = get_bits1(gb);
+ coef_len = coef_res + 3 - coef_compress;
+ tmp2_idx = 2 * coef_compress + coef_res;
+
+ for (i = 0; i < tns->order[w][filt]; i++)
+ tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode Mid/Side data; reference: table 4.54.
+ *
+ * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
+ * [1] mask is decoded from bitstream; [2] mask is all 1s;
+ * [3] reserved for scalable AAC
+ */
+static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
+ int ms_present)
+{
+ int idx;
+ if (ms_present == 1) {
+ for (idx = 0; idx < cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb; idx++)
+ cpe->ms_mask[idx] = get_bits1(gb);
+ } else if (ms_present == 2) {
+ memset(cpe->ms_mask, 1, cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb * sizeof(cpe->ms_mask[0]));
+ }
+}
+
+#ifndef VMUL2
+static inline float *VMUL2(float *dst, const float *v, unsigned idx,
+ const float *scale)
+{
+ float s = *scale;
+ *dst++ = v[idx & 15] * s;
+ *dst++ = v[idx>>4 & 15] * s;
+ return dst;
+}
+#endif
+
+#ifndef VMUL4
+static inline float *VMUL4(float *dst, const float *v, unsigned idx,
+ const float *scale)
+{
+ float s = *scale;
+ *dst++ = v[idx & 3] * s;
+ *dst++ = v[idx>>2 & 3] * s;
+ *dst++ = v[idx>>4 & 3] * s;
+ *dst++ = v[idx>>6 & 3] * s;
+ return dst;
+}
+#endif
+
+#ifndef VMUL2S
+static inline float *VMUL2S(float *dst, const float *v, unsigned idx,
+ unsigned sign, const float *scale)
+{
+ union float754 s0, s1;
+
+ s0.f = s1.f = *scale;
+ s0.i ^= sign >> 1 << 31;
+ s1.i ^= sign << 31;
+
+ *dst++ = v[idx & 15] * s0.f;
+ *dst++ = v[idx>>4 & 15] * s1.f;
+
+ return dst;
+}
+#endif
+
+#ifndef VMUL4S
+static inline float *VMUL4S(float *dst, const float *v, unsigned idx,
+ unsigned sign, const float *scale)
+{
+ unsigned nz = idx >> 12;
+ union float754 s = { .f = *scale };
+ union float754 t;
+
+ t.i = s.i ^ (sign & 1U<<31);
+ *dst++ = v[idx & 3] * t.f;
+
+ sign <<= nz & 1; nz >>= 1;
+ t.i = s.i ^ (sign & 1U<<31);
+ *dst++ = v[idx>>2 & 3] * t.f;
+
+ sign <<= nz & 1; nz >>= 1;
+ t.i = s.i ^ (sign & 1U<<31);
+ *dst++ = v[idx>>4 & 3] * t.f;
+
+ sign <<= nz & 1; nz >>= 1;
+ t.i = s.i ^ (sign & 1U<<31);
+ *dst++ = v[idx>>6 & 3] * t.f;
+
+ return dst;
+}
+#endif
+
+/**
+ * Decode spectral data; reference: table 4.50.
+ * Dequantize and scale spectral data; reference: 4.6.3.3.
+ *
+ * @param coef array of dequantized, scaled spectral data
+ * @param sf array of scalefactors or intensity stereo positions
+ * @param pulse_present set if pulses are present
+ * @param pulse pointer to pulse data struct
+ * @param band_type array of the used band type
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_spectrum_and_dequant(AACContext *ac, float coef[1024],
+ GetBitContext *gb, const float sf[120],
+ int pulse_present, const Pulse *pulse,
+ const IndividualChannelStream *ics,
+ enum BandType band_type[120])
+{
+ int i, k, g, idx = 0;
+ const int c = 1024 / ics->num_windows;
+ const uint16_t *offsets = ics->swb_offset;
+ float *coef_base = coef;
+
+ for (g = 0; g < ics->num_windows; g++)
+ memset(coef + g * 128 + offsets[ics->max_sfb], 0, sizeof(float) * (c - offsets[ics->max_sfb]));
+
+ for (g = 0; g < ics->num_window_groups; g++) {
+ unsigned g_len = ics->group_len[g];
+
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ const unsigned cbt_m1 = band_type[idx] - 1;
+ float *cfo = coef + offsets[i];
+ int off_len = offsets[i + 1] - offsets[i];
+ int group;
+
+ if (cbt_m1 >= INTENSITY_BT2 - 1) {
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ memset(cfo, 0, off_len * sizeof(float));
+ }
+ } else if (cbt_m1 == NOISE_BT - 1) {
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float scale;
+ float band_energy;
+
+ for (k = 0; k < off_len; k++) {
+ ac->random_state = lcg_random(ac->random_state);
+ cfo[k] = ac->random_state;
+ }
+
+ band_energy = ac->dsp.scalarproduct_float(cfo, cfo, off_len);
+ scale = sf[idx] / sqrtf(band_energy);
+ ac->dsp.vector_fmul_scalar(cfo, cfo, scale, off_len);
+ }
+ } else {
+ const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
+ const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1];
+ VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table;
+ OPEN_READER(re, gb);
+
+ switch (cbt_m1 >> 1) {
+ case 0:
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned cb_idx;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ cf = VMUL4(cf, vq, cb_idx, sf + idx);
+ } while (len -= 4);
+ }
+ break;
+
+ case 1:
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nnz;
+ unsigned cb_idx;
+ uint32_t bits;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 8 & 15;
+ bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
+ LAST_SKIP_BITS(re, gb, nnz);
+ cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
+ } while (len -= 4);
+ }
+ break;
+
+ case 2:
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned cb_idx;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ cf = VMUL2(cf, vq, cb_idx, sf + idx);
+ } while (len -= 2);
+ }
+ break;
+
+ case 3:
+ case 4:
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nnz;
+ unsigned cb_idx;
+ unsigned sign;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 8 & 15;
+ sign = SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12);
+ LAST_SKIP_BITS(re, gb, nnz);
+ cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
+ } while (len -= 2);
+ }
+ break;
+
+ default:
+ for (group = 0; group < g_len; group++, cfo+=128) {
+ float *cf = cfo;
+ uint32_t *icf = (uint32_t *) cf;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nzt, nnz;
+ unsigned cb_idx;
+ uint32_t bits;
+ int j;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+
+ if (!code) {
+ *icf++ = 0;
+ *icf++ = 0;
+ continue;
+ }
+
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 12;
+ nzt = cb_idx >> 8;
+ bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
+ LAST_SKIP_BITS(re, gb, nnz);
+
+ for (j = 0; j < 2; j++) {
+ if (nzt & 1<<j) {
+ uint32_t b;
+ int n;
+ /* The total length of escape_sequence must be < 22 bits according
+ to the specification (i.e. max is 111111110xxxxxxxxxxxx). */
+ UPDATE_CACHE(re, gb);
+ b = GET_CACHE(re, gb);
+ b = 31 - av_log2(~b);
+
+ if (b > 8) {
+ av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
+ return -1;
+ }
+
+ SKIP_BITS(re, gb, b + 1);
+ b += 4;
+ n = (1 << b) + SHOW_UBITS(re, gb, b);
+ LAST_SKIP_BITS(re, gb, b);
+ *icf++ = cbrt_tab[n] | (bits & 1U<<31);
+ bits <<= 1;
+ } else {
+ unsigned v = ((const uint32_t*)vq)[cb_idx & 15];
+ *icf++ = (bits & 1U<<31) | v;
+ bits <<= !!v;
+ }
+ cb_idx >>= 4;
+ }
+ } while (len -= 2);
+
+ ac->dsp.vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
+ }
+ }
+
+ CLOSE_READER(re, gb);
+ }
+ }
+ coef += g_len << 7;
+ }
+
+ if (pulse_present) {
+ idx = 0;
+ for (i = 0; i < pulse->num_pulse; i++) {
+ float co = coef_base[ pulse->pos[i] ];
+ while (offsets[idx + 1] <= pulse->pos[i])
+ idx++;
+ if (band_type[idx] != NOISE_BT && sf[idx]) {
+ float ico = -pulse->amp[i];
+ if (co) {
+ co /= sf[idx];
+ ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico);
+ }
+ coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx];
+ }
+ }
+ }
+ return 0;
+}
+
+static av_always_inline float flt16_round(float pf)
+{
+ union float754 tmp;
+ tmp.f = pf;
+ tmp.i = (tmp.i + 0x00008000U) & 0xFFFF0000U;
+ return tmp.f;
+}
+
+static av_always_inline float flt16_even(float pf)
+{
+ union float754 tmp;
+ tmp.f = pf;
+ tmp.i = (tmp.i + 0x00007FFFU + (tmp.i & 0x00010000U >> 16)) & 0xFFFF0000U;
+ return tmp.f;
+}
+
+static av_always_inline float flt16_trunc(float pf)
+{
+ union float754 pun;
+ pun.f = pf;
+ pun.i &= 0xFFFF0000U;
+ return pun.f;
+}
+
+static av_always_inline void predict(PredictorState *ps, float *coef,
+ float sf_scale, float inv_sf_scale,
+ int output_enable)
+{
+ const float a = 0.953125; // 61.0 / 64
+ const float alpha = 0.90625; // 29.0 / 32
+ float e0, e1;
+ float pv;
+ float k1, k2;
+ float r0 = ps->r0, r1 = ps->r1;
+ float cor0 = ps->cor0, cor1 = ps->cor1;
+ float var0 = ps->var0, var1 = ps->var1;
+
+ k1 = var0 > 1 ? cor0 * flt16_even(a / var0) : 0;
+ k2 = var1 > 1 ? cor1 * flt16_even(a / var1) : 0;
+
+ pv = flt16_round(k1 * r0 + k2 * r1);
+ if (output_enable)
+ *coef += pv * sf_scale;
+
+ e0 = *coef * inv_sf_scale;
+ e1 = e0 - k1 * r0;
+
+ ps->cor1 = flt16_trunc(alpha * cor1 + r1 * e1);
+ ps->var1 = flt16_trunc(alpha * var1 + 0.5f * (r1 * r1 + e1 * e1));
+ ps->cor0 = flt16_trunc(alpha * cor0 + r0 * e0);
+ ps->var0 = flt16_trunc(alpha * var0 + 0.5f * (r0 * r0 + e0 * e0));
+
+ ps->r1 = flt16_trunc(a * (r0 - k1 * e0));
+ ps->r0 = flt16_trunc(a * e0);
+}
+
+/**
+ * Apply AAC-Main style frequency domain prediction.
+ */
+static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
+{
+ int sfb, k;
+ float sf_scale = ac->sf_scale, inv_sf_scale = 1 / ac->sf_scale;
+
+ if (!sce->ics.predictor_initialized) {
+ reset_all_predictors(sce->predictor_state);
+ sce->ics.predictor_initialized = 1;
+ }
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ for (sfb = 0; sfb < ff_aac_pred_sfb_max[ac->m4ac.sampling_index]; sfb++) {
+ for (k = sce->ics.swb_offset[sfb]; k < sce->ics.swb_offset[sfb + 1]; k++) {
+ predict(&sce->predictor_state[k], &sce->coeffs[k],
+ sf_scale, inv_sf_scale,
+ sce->ics.predictor_present && sce->ics.prediction_used[sfb]);
+ }
+ }
+ if (sce->ics.predictor_reset_group)
+ reset_predictor_group(sce->predictor_state, sce->ics.predictor_reset_group);
+ } else
+ reset_all_predictors(sce->predictor_state);
+}
+
+/**
+ * Decode an individual_channel_stream payload; reference: table 4.44.
+ *
+ * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
+ * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.)
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_ics(AACContext *ac, SingleChannelElement *sce,
+ GetBitContext *gb, int common_window, int scale_flag)
+{
+ Pulse pulse;
+ TemporalNoiseShaping *tns = &sce->tns;
+ IndividualChannelStream *ics = &sce->ics;
+ float *out = sce->coeffs;
+ int global_gain, pulse_present = 0;
+
+ /* This assignment is to silence a GCC warning about the variable being used
+ * uninitialized when in fact it always is.
+ */
+ pulse.num_pulse = 0;
+
+ global_gain = get_bits(gb, 8);
+
+ if (!common_window && !scale_flag) {
+ if (decode_ics_info(ac, ics, gb, 0) < 0)
+ return -1;
+ }
+
+ if (decode_band_types(ac, sce->band_type, sce->band_type_run_end, gb, ics) < 0)
+ return -1;
+ if (decode_scalefactors(ac, sce->sf, gb, global_gain, ics, sce->band_type, sce->band_type_run_end) < 0)
+ return -1;
+
+ pulse_present = 0;
+ if (!scale_flag) {
+ if ((pulse_present = get_bits1(gb))) {
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Pulse tool not allowed in eight short sequence.\n");
+ return -1;
+ }
+ if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Pulse data corrupt or invalid.\n");
+ return -1;
+ }
+ }
+ if ((tns->present = get_bits1(gb)) && decode_tns(ac, tns, gb, ics))
+ return -1;
+ if (get_bits1(gb)) {
+ av_log_missing_feature(ac->avctx, "SSR", 1);
+ return -1;
+ }
+ }
+
+ if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present, &pulse, ics, sce->band_type) < 0)
+ return -1;
+
+ if (ac->m4ac.object_type == AOT_AAC_MAIN && !common_window)
+ apply_prediction(ac, sce);
+
+ return 0;
+}
+
+/**
+ * Mid/Side stereo decoding; reference: 4.6.8.1.3.
+ */
+static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
+{
+ const IndividualChannelStream *ics = &cpe->ch[0].ics;
+ float *ch0 = cpe->ch[0].coeffs;
+ float *ch1 = cpe->ch[1].coeffs;
+ int g, i, group, idx = 0;
+ const uint16_t *offsets = ics->swb_offset;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ if (cpe->ms_mask[idx] &&
+ cpe->ch[0].band_type[idx] < NOISE_BT && cpe->ch[1].band_type[idx] < NOISE_BT) {
+ for (group = 0; group < ics->group_len[g]; group++) {
+ ac->dsp.butterflies_float(ch0 + group * 128 + offsets[i],
+ ch1 + group * 128 + offsets[i],
+ offsets[i+1] - offsets[i]);
+ }
+ }
+ }
+ ch0 += ics->group_len[g] * 128;
+ ch1 += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * intensity stereo decoding; reference: 4.6.8.2.3
+ *
+ * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
+ * [1] mask is decoded from bitstream; [2] mask is all 1s;
+ * [3] reserved for scalable AAC
+ */
+static void apply_intensity_stereo(AACContext *ac, ChannelElement *cpe, int ms_present)
+{
+ const IndividualChannelStream *ics = &cpe->ch[1].ics;
+ SingleChannelElement *sce1 = &cpe->ch[1];
+ float *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs;
+ const uint16_t *offsets = ics->swb_offset;
+ int g, group, i, idx = 0;
+ int c;
+ float scale;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb;) {
+ if (sce1->band_type[idx] == INTENSITY_BT || sce1->band_type[idx] == INTENSITY_BT2) {
+ const int bt_run_end = sce1->band_type_run_end[idx];
+ for (; i < bt_run_end; i++, idx++) {
+ c = -1 + 2 * (sce1->band_type[idx] - 14);
+ if (ms_present)
+ c *= 1 - 2 * cpe->ms_mask[idx];
+ scale = c * sce1->sf[idx];
+ for (group = 0; group < ics->group_len[g]; group++)
+ ac->dsp.vector_fmul_scalar(coef1 + group * 128 + offsets[i],
+ coef0 + group * 128 + offsets[i],
+ scale,
+ offsets[i + 1] - offsets[i]);
+ }
+ } else {
+ int bt_run_end = sce1->band_type_run_end[idx];
+ idx += bt_run_end - i;
+ i = bt_run_end;
+ }
+ }
+ coef0 += ics->group_len[g] * 128;
+ coef1 += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * Decode a channel_pair_element; reference: table 4.4.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
+{
+ int i, ret, common_window, ms_present = 0;
+
+ common_window = get_bits1(gb);
+ if (common_window) {
+ if (decode_ics_info(ac, &cpe->ch[0].ics, gb, 1))
+ return -1;
+ i = cpe->ch[1].ics.use_kb_window[0];
+ cpe->ch[1].ics = cpe->ch[0].ics;
+ cpe->ch[1].ics.use_kb_window[1] = i;
+ if (cpe->ch[1].ics.predictor_present && (ac->m4ac.object_type != AOT_AAC_MAIN))
+ if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
+ decode_ltp(ac, &cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
+ ms_present = get_bits(gb, 2);
+ if (ms_present == 3) {
+ av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
+ return -1;
+ } else if (ms_present)
+ decode_mid_side_stereo(cpe, gb, ms_present);
+ }
+ if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0)))
+ return ret;
+ if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0)))
+ return ret;
+
+ if (common_window) {
+ if (ms_present)
+ apply_mid_side_stereo(ac, cpe);
+ if (ac->m4ac.object_type == AOT_AAC_MAIN) {
+ apply_prediction(ac, &cpe->ch[0]);
+ apply_prediction(ac, &cpe->ch[1]);
+ }
+ }
+
+ apply_intensity_stereo(ac, cpe, ms_present);
+ return 0;
+}
+
+static const float cce_scale[] = {
+ 1.09050773266525765921, //2^(1/8)
+ 1.18920711500272106672, //2^(1/4)
+ M_SQRT2,
+ 2,
+};
+
+/**
+ * Decode coupling_channel_element; reference: table 4.8.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che)
+{
+ int num_gain = 0;
+ int c, g, sfb, ret;
+ int sign;
+ float scale;
+ SingleChannelElement *sce = &che->ch[0];
+ ChannelCoupling *coup = &che->coup;
+
+ coup->coupling_point = 2 * get_bits1(gb);
+ coup->num_coupled = get_bits(gb, 3);
+ for (c = 0; c <= coup->num_coupled; c++) {
+ num_gain++;
+ coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE;
+ coup->id_select[c] = get_bits(gb, 4);
+ if (coup->type[c] == TYPE_CPE) {
+ coup->ch_select[c] = get_bits(gb, 2);
+ if (coup->ch_select[c] == 3)
+ num_gain++;
+ } else
+ coup->ch_select[c] = 2;
+ }
+ coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1);
+
+ sign = get_bits(gb, 1);
+ scale = cce_scale[get_bits(gb, 2)];
+
+ if ((ret = decode_ics(ac, sce, gb, 0, 0)))
+ return ret;
+
+ for (c = 0; c < num_gain; c++) {
+ int idx = 0;
+ int cge = 1;
+ int gain = 0;
+ float gain_cache = 1.;
+ if (c) {
+ cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb);
+ gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0;
+ gain_cache = powf(scale, -gain);
+ }
+ if (coup->coupling_point == AFTER_IMDCT) {
+ coup->gain[c][0] = gain_cache;
+ } else {
+ for (g = 0; g < sce->ics.num_window_groups; g++) {
+ for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) {
+ if (sce->band_type[idx] != ZERO_BT) {
+ if (!cge) {
+ int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
+ if (t) {
+ int s = 1;
+ t = gain += t;
+ if (sign) {
+ s -= 2 * (t & 0x1);
+ t >>= 1;
+ }
+ gain_cache = powf(scale, -t) * s;
+ }
+ }
+ coup->gain[c][idx] = gain_cache;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53.
+ *
+ * @return Returns number of bytes consumed.
+ */
+static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc,
+ GetBitContext *gb)
+{
+ int i;
+ int num_excl_chan = 0;
+
+ do {
+ for (i = 0; i < 7; i++)
+ che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb);
+ } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb));
+
+ return num_excl_chan / 7;
+}
+
+/**
+ * Decode dynamic range information; reference: table 4.52.
+ *
+ * @param cnt length of TYPE_FIL syntactic element in bytes
+ *
+ * @return Returns number of bytes consumed.
+ */
+static int decode_dynamic_range(DynamicRangeControl *che_drc,
+ GetBitContext *gb, int cnt)
+{
+ int n = 1;
+ int drc_num_bands = 1;
+ int i;
+
+ /* pce_tag_present? */
+ if (get_bits1(gb)) {
+ che_drc->pce_instance_tag = get_bits(gb, 4);
+ skip_bits(gb, 4); // tag_reserved_bits
+ n++;
+ }
+
+ /* excluded_chns_present? */
+ if (get_bits1(gb)) {
+ n += decode_drc_channel_exclusions(che_drc, gb);
+ }
+
+ /* drc_bands_present? */
+ if (get_bits1(gb)) {
+ che_drc->band_incr = get_bits(gb, 4);
+ che_drc->interpolation_scheme = get_bits(gb, 4);
+ n++;
+ drc_num_bands += che_drc->band_incr;
+ for (i = 0; i < drc_num_bands; i++) {
+ che_drc->band_top[i] = get_bits(gb, 8);
+ n++;
+ }
+ }
+
+ /* prog_ref_level_present? */
+ if (get_bits1(gb)) {
+ che_drc->prog_ref_level = get_bits(gb, 7);
+ skip_bits1(gb); // prog_ref_level_reserved_bits
+ n++;
+ }
+
+ for (i = 0; i < drc_num_bands; i++) {
+ che_drc->dyn_rng_sgn[i] = get_bits1(gb);
+ che_drc->dyn_rng_ctl[i] = get_bits(gb, 7);
+ n++;
+ }
+
+ return n;
+}
+
+/**
+ * Decode extension data (incomplete); reference: table 4.51.
+ *
+ * @param cnt length of TYPE_FIL syntactic element in bytes
+ *
+ * @return Returns number of bytes consumed
+ */
+static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
+ ChannelElement *che, enum RawDataBlockType elem_type)
+{
+ int crc_flag = 0;
+ int res = cnt;
+ switch (get_bits(gb, 4)) { // extension type
+ case EXT_SBR_DATA_CRC:
+ crc_flag++;
+ case EXT_SBR_DATA:
+ if (!che) {
+ av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
+ return res;
+ } else if (!ac->m4ac.sbr) {
+ av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
+ skip_bits_long(gb, 8 * cnt - 4);
+ return res;
+ } else if (ac->m4ac.sbr == -1 && ac->output_configured == OC_LOCKED) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
+ skip_bits_long(gb, 8 * cnt - 4);
+ return res;
+ } else if (ac->m4ac.ps == -1 && ac->output_configured < OC_LOCKED && ac->avctx->channels == 1) {
+ ac->m4ac.sbr = 1;
+ ac->m4ac.ps = 1;
+ output_configure(ac, ac->che_pos, ac->che_pos, ac->m4ac.chan_config, ac->output_configured);
+ } else {
+ ac->m4ac.sbr = 1;
+ }
+ res = ff_decode_sbr_extension(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
+ break;
+ case EXT_DYNAMIC_RANGE:
+ res = decode_dynamic_range(&ac->che_drc, gb, cnt);
+ break;
+ case EXT_FILL:
+ case EXT_FILL_DATA:
+ case EXT_DATA_ELEMENT:
+ default:
+ skip_bits_long(gb, 8 * cnt - 4);
+ break;
+ };
+ return res;
+}
+
+/**
+ * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3.
+ *
+ * @param decode 1 if tool is used normally, 0 if tool is used in LTP.
+ * @param coef spectral coefficients
+ */
+static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
+ IndividualChannelStream *ics, int decode)
+{
+ const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
+ int w, filt, m, i;
+ int bottom, top, order, start, end, size, inc;
+ float lpc[TNS_MAX_ORDER];
+ float tmp[TNS_MAX_ORDER];
+
+ for (w = 0; w < ics->num_windows; w++) {
+ bottom = ics->num_swb;
+ for (filt = 0; filt < tns->n_filt[w]; filt++) {
+ top = bottom;
+ bottom = FFMAX(0, top - tns->length[w][filt]);
+ order = tns->order[w][filt];
+ if (order == 0)
+ continue;
+
+ // tns_decode_coef
+ compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0);
+
+ start = ics->swb_offset[FFMIN(bottom, mmm)];
+ end = ics->swb_offset[FFMIN( top, mmm)];
+ if ((size = end - start) <= 0)
+ continue;
+ if (tns->direction[w][filt]) {
+ inc = -1;
+ start = end - 1;
+ } else {
+ inc = 1;
+ }
+ start += w * 128;
+
+ if (decode) {
+ // ar filter
+ for (m = 0; m < size; m++, start += inc)
+ for (i = 1; i <= FFMIN(m, order); i++)
+ coef[start] -= coef[start - i * inc] * lpc[i - 1];
+ } else {
+ // ma filter
+ for (m = 0; m < size; m++, start += inc) {
+ tmp[0] = coef[start];
+ for (i = 1; i <= FFMIN(m, order); i++)
+ coef[start] += tmp[i] * lpc[i - 1];
+ for (i = order; i > 0; i--)
+ tmp[i] = tmp[i - 1];
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Apply windowing and MDCT to obtain the spectral
+ * coefficient from the predicted sample by LTP.
+ */
+static void windowing_and_mdct_ltp(AACContext *ac, float *out,
+ float *in, IndividualChannelStream *ics)
+{
+ const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
+
+ if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
+ ac->dsp.vector_fmul(in, in, lwindow_prev, 1024);
+ } else {
+ memset(in, 0, 448 * sizeof(float));
+ ac->dsp.vector_fmul(in + 448, in + 448, swindow_prev, 128);
+ memcpy(in + 576, in + 576, 448 * sizeof(float));
+ }
+ if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
+ ac->dsp.vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
+ } else {
+ memcpy(in + 1024, in + 1024, 448 * sizeof(float));
+ ac->dsp.vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
+ memset(in + 1024 + 576, 0, 448 * sizeof(float));
+ }
+ ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
+}
+
+/**
+ * Apply the long term prediction
+ */
+static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
+{
+ const LongTermPrediction *ltp = &sce->ics.ltp;
+ const uint16_t *offsets = sce->ics.swb_offset;
+ int i, sfb;
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ float *predTime = sce->ret;
+ float *predFreq = ac->buf_mdct;
+ int16_t num_samples = 2048;
+
+ if (ltp->lag < 1024)
+ num_samples = ltp->lag + 1024;
+ for (i = 0; i < num_samples; i++)
+ predTime[i] = sce->ltp_state[i + 2048 - ltp->lag] * ltp->coef;
+ memset(&predTime[i], 0, (2048 - i) * sizeof(float));
+
+ windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
+
+ if (sce->tns.present)
+ apply_tns(predFreq, &sce->tns, &sce->ics, 0);
+
+ for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
+ if (ltp->used[sfb])
+ for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
+ sce->coeffs[i] += predFreq[i];
+ }
+}
+
+/**
+ * Update the LTP buffer for next frame
+ */
+static void update_ltp(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ float *saved = sce->saved;
+ float *saved_ltp = sce->coeffs;
+ const float *lwindow = ics->use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ int i;
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ memcpy(saved_ltp, saved, 512 * sizeof(float));
+ memset(saved_ltp + 576, 0, 448 * sizeof(float));
+ ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ for (i = 0; i < 64; i++)
+ saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(float));
+ memset(saved_ltp + 576, 0, 448 * sizeof(float));
+ ac->dsp.vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+ for (i = 0; i < 64; i++)
+ saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * swindow[63 - i];
+ } else { // LONG_STOP or ONLY_LONG
+ ac->dsp.vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
+ for (i = 0; i < 512; i++)
+ saved_ltp[i + 512] = ac->buf_mdct[1023 - i] * lwindow[511 - i];
+ }
+
+ memcpy(sce->ltp_state, &sce->ltp_state[1024], 1024 * sizeof(int16_t));
+ ac->fmt_conv.float_to_int16(&(sce->ltp_state[1024]), sce->ret, 1024);
+ ac->fmt_conv.float_to_int16(&(sce->ltp_state[2048]), saved_ltp, 1024);
+}
+
+/**
+ * Conduct IMDCT and windowing.
+ */
+static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ float *in = sce->coeffs;
+ float *out = sce->ret;
+ float *saved = sce->saved;
+ const float *swindow = ics->use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ const float *lwindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float *swindow_prev = ics->use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
+ float *buf = ac->buf_mdct;
+ float *temp = ac->temp;
+ int i;
+
+ // imdct
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ for (i = 0; i < 1024; i += 128)
+ ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
+ } else
+ ac->mdct.imdct_half(&ac->mdct, buf, in);
+
+ /* window overlapping
+ * NOTE: To simplify the overlapping code, all 'meaningless' short to long
+ * and long to short transitions are considered to be short to short
+ * transitions. This leaves just two cases (long to long and short to short)
+ * with a little special sauce for EIGHT_SHORT_SEQUENCE.
+ */
+ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
+ (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
+ ac->dsp.vector_fmul_window( out, saved, buf, lwindow_prev, 512);
+ } else {
+ memcpy( out, saved, 448 * sizeof(float));
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ ac->dsp.vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
+ ac->dsp.vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
+ ac->dsp.vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
+ ac->dsp.vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
+ ac->dsp.vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
+ memcpy( out + 448 + 4*128, temp, 64 * sizeof(float));
+ } else {
+ ac->dsp.vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
+ memcpy( out + 576, buf + 64, 448 * sizeof(float));
+ }
+ }
+
+ // buffer update
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ memcpy( saved, temp + 64, 64 * sizeof(float));
+ ac->dsp.vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
+ ac->dsp.vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
+ ac->dsp.vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ memcpy( saved, buf + 512, 448 * sizeof(float));
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(float));
+ } else { // LONG_STOP or ONLY_LONG
+ memcpy( saved, buf + 512, 512 * sizeof(float));
+ }
+}
+
+/**
+ * Apply dependent channel coupling (applied before IMDCT).
+ *
+ * @param index index into coupling gain array
+ */
+static void apply_dependent_coupling(AACContext *ac,
+ SingleChannelElement *target,
+ ChannelElement *cce, int index)
+{
+ IndividualChannelStream *ics = &cce->ch[0].ics;
+ const uint16_t *offsets = ics->swb_offset;
+ float *dest = target->coeffs;
+ const float *src = cce->ch[0].coeffs;
+ int g, i, group, k, idx = 0;
+ if (ac->m4ac.object_type == AOT_AAC_LTP) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Dependent coupling is not supported together with LTP\n");
+ return;
+ }
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ if (cce->ch[0].band_type[idx] != ZERO_BT) {
+ const float gain = cce->coup.gain[index][idx];
+ for (group = 0; group < ics->group_len[g]; group++) {
+ for (k = offsets[i]; k < offsets[i + 1]; k++) {
+ // XXX dsputil-ize
+ dest[group * 128 + k] += gain * src[group * 128 + k];
+ }
+ }
+ }
+ }
+ dest += ics->group_len[g] * 128;
+ src += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * Apply independent channel coupling (applied after IMDCT).
+ *
+ * @param index index into coupling gain array
+ */
+static void apply_independent_coupling(AACContext *ac,
+ SingleChannelElement *target,
+ ChannelElement *cce, int index)
+{
+ int i;
+ const float gain = cce->coup.gain[index][0];
+ const float *src = cce->ch[0].ret;
+ float *dest = target->ret;
+ const int len = 1024 << (ac->m4ac.sbr == 1);
+
+ for (i = 0; i < len; i++)
+ dest[i] += gain * src[i];
+}
+
+/**
+ * channel coupling transformation interface
+ *
+ * @param apply_coupling_method pointer to (in)dependent coupling function
+ */
+static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
+ enum RawDataBlockType type, int elem_id,
+ enum CouplingPoint coupling_point,
+ void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
+{
+ int i, c;
+
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ ChannelElement *cce = ac->che[TYPE_CCE][i];
+ int index = 0;
+
+ if (cce && cce->coup.coupling_point == coupling_point) {
+ ChannelCoupling *coup = &cce->coup;
+
+ for (c = 0; c <= coup->num_coupled; c++) {
+ if (coup->type[c] == type && coup->id_select[c] == elem_id) {
+ if (coup->ch_select[c] != 1) {
+ apply_coupling_method(ac, &cc->ch[0], cce, index);
+ if (coup->ch_select[c] != 0)
+ index++;
+ }
+ if (coup->ch_select[c] != 2)
+ apply_coupling_method(ac, &cc->ch[1], cce, index++);
+ } else
+ index += 1 + (coup->ch_select[c] == 3);
+ }
+ }
+ }
+}
+
+/**
+ * Convert spectral data to float samples, applying all supported tools as appropriate.
+ */
+static void spectral_to_sample(AACContext *ac)
+{
+ int i, type;
+ for (type = 3; type >= 0; type--) {
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ ChannelElement *che = ac->che[type][i];
+ if (che) {
+ if (type <= TYPE_CPE)
+ apply_channel_coupling(ac, che, type, i, BEFORE_TNS, apply_dependent_coupling);
+ if (ac->m4ac.object_type == AOT_AAC_LTP) {
+ if (che->ch[0].ics.predictor_present) {
+ if (che->ch[0].ics.ltp.present)
+ apply_ltp(ac, &che->ch[0]);
+ if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
+ apply_ltp(ac, &che->ch[1]);
+ }
+ }
+ if (che->ch[0].tns.present)
+ apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
+ if (che->ch[1].tns.present)
+ apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
+ if (type <= TYPE_CPE)
+ apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, apply_dependent_coupling);
+ if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
+ imdct_and_windowing(ac, &che->ch[0]);
+ if (ac->m4ac.object_type == AOT_AAC_LTP)
+ update_ltp(ac, &che->ch[0]);
+ if (type == TYPE_CPE) {
+ imdct_and_windowing(ac, &che->ch[1]);
+ if (ac->m4ac.object_type == AOT_AAC_LTP)
+ update_ltp(ac, &che->ch[1]);
+ }
+ if (ac->m4ac.sbr > 0) {
+ ff_sbr_apply(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
+ }
+ }
+ if (type <= TYPE_CCE)
+ apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, apply_independent_coupling);
+ }
+ }
+ }
+}
+
+static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
+{
+ int size;
+ AACADTSHeaderInfo hdr_info;
+
+ size = ff_aac_parse_header(gb, &hdr_info);
+ if (size > 0) {
+ if (ac->output_configured != OC_LOCKED && hdr_info.chan_config) {
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
+ memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
+ ac->m4ac.chan_config = hdr_info.chan_config;
+ if (set_default_channel_config(ac->avctx, new_che_pos, hdr_info.chan_config))
+ return -7;
+ if (output_configure(ac, ac->che_pos, new_che_pos, hdr_info.chan_config, OC_TRIAL_FRAME))
+ return -7;
+ } else if (ac->output_configured != OC_LOCKED) {
+ ac->output_configured = OC_NONE;
+ }
+ if (ac->output_configured != OC_LOCKED) {
+ ac->m4ac.sbr = -1;
+ ac->m4ac.ps = -1;
+ }
+ ac->m4ac.sample_rate = hdr_info.sample_rate;
+ ac->m4ac.sampling_index = hdr_info.sampling_index;
+ ac->m4ac.object_type = hdr_info.object_type;
+ if (!ac->avctx->sample_rate)
+ ac->avctx->sample_rate = hdr_info.sample_rate;
+ if (hdr_info.num_aac_frames == 1) {
+ if (!hdr_info.crc_absent)
+ skip_bits(gb, 16);
+ } else {
+ av_log_missing_feature(ac->avctx, "More than one AAC RDB per ADTS frame is", 0);
+ return -1;
+ }
+ }
+ return size;
+}
+
+static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
+ int *data_size, GetBitContext *gb)
+{
+ AACContext *ac = avctx->priv_data;
+ ChannelElement *che = NULL, *che_prev = NULL;
+ enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
+ int err, elem_id, data_size_tmp;
+ int samples = 0, multiplier;
+
+ if (show_bits(gb, 12) == 0xfff) {
+ if (parse_adts_frame_header(ac, gb) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
+ return -1;
+ }
+ if (ac->m4ac.sampling_index > 12) {
+ av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->m4ac.sampling_index);
+ return -1;
+ }
+ }
+
+ ac->tags_mapped = 0;
+ // parse
+ while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
+ elem_id = get_bits(gb, 4);
+
+ if (elem_type < TYPE_DSE) {
+ if (!(che=get_che(ac, elem_type, elem_id))) {
+ av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
+ elem_type, elem_id);
+ return -1;
+ }
+ samples = 1024;
+ }
+
+ switch (elem_type) {
+
+ case TYPE_SCE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ break;
+
+ case TYPE_CPE:
+ err = decode_cpe(ac, gb, che);
+ break;
+
+ case TYPE_CCE:
+ err = decode_cce(ac, gb, che);
+ break;
+
+ case TYPE_LFE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ break;
+
+ case TYPE_DSE:
+ err = skip_data_stream_element(ac, gb);
+ break;
+
+ case TYPE_PCE: {
+ enum ChannelPosition new_che_pos[4][MAX_ELEM_ID];
+ memset(new_che_pos, 0, 4 * MAX_ELEM_ID * sizeof(new_che_pos[0][0]));
+ if ((err = decode_pce(avctx, &ac->m4ac, new_che_pos, gb)))
+ break;
+ if (ac->output_configured > OC_TRIAL_PCE)
+ av_log(avctx, AV_LOG_ERROR,
+ "Not evaluating a further program_config_element as this construct is dubious at best.\n");
+ else
+ err = output_configure(ac, ac->che_pos, new_che_pos, 0, OC_TRIAL_PCE);
+ break;
+ }
+
+ case TYPE_FIL:
+ if (elem_id == 15)
+ elem_id += get_bits(gb, 8) - 1;
+ if (get_bits_left(gb) < 8 * elem_id) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ while (elem_id > 0)
+ elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
+ err = 0; /* FIXME */
+ break;
+
+ default:
+ err = -1; /* should not happen, but keeps compiler happy */
+ break;
+ }
+
+ che_prev = che;
+ elem_type_prev = elem_type;
+
+ if (err)
+ return err;
+
+ if (get_bits_left(gb) < 3) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ return -1;
+ }
+ }
+
+ spectral_to_sample(ac);
+
+ multiplier = (ac->m4ac.sbr == 1) ? ac->m4ac.ext_sample_rate > ac->m4ac.sample_rate : 0;
+ samples <<= multiplier;
+ if (ac->output_configured < OC_LOCKED) {
+ avctx->sample_rate = ac->m4ac.sample_rate << multiplier;
+ avctx->frame_size = samples;
+ }
+
+ /* ffdshow custom code */
+#if CONFIG_AUDIO_FLOAT
+ data_size_tmp = samples * avctx->channels * sizeof(float);
+#else
+ data_size_tmp = samples * avctx->channels * sizeof(int16_t);
+#endif
+ if (*data_size < data_size_tmp) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Output buffer too small (%d) or trying to output too many samples (%d) for this frame.\n",
+ *data_size, data_size_tmp);
+ return -1;
+ }
+ *data_size = data_size_tmp;
+
+ if (samples) {
+ /* ffdshow custom code */
+#if CONFIG_AUDIO_FLOAT
+ float_interleave(data, (const float **)ac->output_data, samples, avctx->channels);
+#else
+ ac->fmt_conv.float_to_int16_interleave(data, (const float **)ac->output_data, samples, avctx->channels);
+#endif
+ }
+
+ if (ac->output_configured)
+ ac->output_configured = OC_LOCKED;
+
+ return 0;
+}
+
+static int aac_decode_frame(AVCodecContext *avctx, void *data,
+ int *data_size, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ GetBitContext gb;
+ int buf_consumed;
+ int buf_offset;
+ int err;
+
+ init_get_bits(&gb, buf, buf_size * 8);
+
+ if ((err = aac_decode_frame_int(avctx, data, data_size, &gb)) < 0)
+ return err;
+
+ buf_consumed = (get_bits_count(&gb) + 7) >> 3;
+ for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++)
+ if (buf[buf_offset])
+ break;
+
+ return buf_size > buf_offset ? buf_consumed : buf_size;
+}
+
+static av_cold int aac_decode_close(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+ int i, type;
+
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ for (type = 0; type < 4; type++) {
+ if (ac->che[type][i])
+ ff_aac_sbr_ctx_close(&ac->che[type][i]->sbr);
+ av_freep(&ac->che[type][i]);
+ }
+ }
+
+ ff_mdct_end(&ac->mdct);
+ ff_mdct_end(&ac->mdct_small);
+ ff_mdct_end(&ac->mdct_ltp);
+ return 0;
+}
+
+
+#define LOAS_SYNC_WORD 0x2b7 ///< 11 bits LOAS sync word
+
+struct LATMContext {
+ AACContext aac_ctx; ///< containing AACContext
+ int initialized; ///< initilized after a valid extradata was seen
+
+ // parser data
+ int audio_mux_version_A; ///< LATM syntax version
+ int frame_length_type; ///< 0/1 variable/fixed frame length
+ int frame_length; ///< frame length for fixed frame length
+};
+
+static inline uint32_t latm_get_value(GetBitContext *b)
+{
+ int length = get_bits(b, 2);
+
+ return get_bits_long(b, (length+1)*8);
+}
+
+static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
+ GetBitContext *gb)
+{
+ AVCodecContext *avctx = latmctx->aac_ctx.avctx;
+ MPEG4AudioConfig m4ac;
+ int config_start_bit = get_bits_count(gb);
+ int bits_consumed, esize;
+
+ if (config_start_bit % 8) {
+ av_log_missing_feature(latmctx->aac_ctx.avctx, "audio specific "
+ "config not byte aligned.\n", 1);
+ return AVERROR_INVALIDDATA;
+ } else {
+ bits_consumed =
+ decode_audio_specific_config(NULL, avctx, &m4ac,
+ gb->buffer + (config_start_bit / 8),
+ get_bits_left(gb) / 8);
+
+ if (bits_consumed < 0)
+ return AVERROR_INVALIDDATA;
+
+ esize = (bits_consumed+7) / 8;
+
+ if (avctx->extradata_size <= esize) {
+ av_free(avctx->extradata);
+ avctx->extradata = av_malloc(esize + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!avctx->extradata)
+ return AVERROR(ENOMEM);
+ }
+
+ avctx->extradata_size = esize;
+ memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize);
+ memset(avctx->extradata+esize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ skip_bits_long(gb, bits_consumed);
+ }
+
+ return bits_consumed;
+}
+
+static int read_stream_mux_config(struct LATMContext *latmctx,
+ GetBitContext *gb)
+{
+ int ret, audio_mux_version = get_bits(gb, 1);
+
+ latmctx->audio_mux_version_A = 0;
+ if (audio_mux_version)
+ latmctx->audio_mux_version_A = get_bits(gb, 1);
+
+ if (!latmctx->audio_mux_version_A) {
+
+ if (audio_mux_version)
+ latm_get_value(gb); // taraFullness
+
+ skip_bits(gb, 1); // allStreamSameTimeFraming
+ skip_bits(gb, 6); // numSubFrames
+ // numPrograms
+ if (get_bits(gb, 4)) { // numPrograms
+ av_log_missing_feature(latmctx->aac_ctx.avctx,
+ "multiple programs are not supported\n", 1);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ // for each program (which there is only on in DVB)
+
+ // for each layer (which there is only on in DVB)
+ if (get_bits(gb, 3)) { // numLayer
+ av_log_missing_feature(latmctx->aac_ctx.avctx,
+ "multiple layers are not supported\n", 1);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ // for all but first stream: use_same_config = get_bits(gb, 1);
+ if (!audio_mux_version) {
+ if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0)
+ return ret;
+ } else {
+ int ascLen = latm_get_value(gb);
+ if ((ret = latm_decode_audio_specific_config(latmctx, gb)) < 0)
+ return ret;
+ ascLen -= ret;
+ skip_bits_long(gb, ascLen);
+ }
+
+ latmctx->frame_length_type = get_bits(gb, 3);
+ switch (latmctx->frame_length_type) {
+ case 0:
+ skip_bits(gb, 8); // latmBufferFullness
+ break;
+ case 1:
+ latmctx->frame_length = get_bits(gb, 9);
+ break;
+ case 3:
+ case 4:
+ case 5:
+ skip_bits(gb, 6); // CELP frame length table index
+ break;
+ case 6:
+ case 7:
+ skip_bits(gb, 1); // HVXC frame length table index
+ break;
+ }
+
+ if (get_bits(gb, 1)) { // other data
+ if (audio_mux_version) {
+ latm_get_value(gb); // other_data_bits
+ } else {
+ int esc;
+ do {
+ esc = get_bits(gb, 1);
+ skip_bits(gb, 8);
+ } while (esc);
+ }
+ }
+
+ if (get_bits(gb, 1)) // crc present
+ skip_bits(gb, 8); // config_crc
+ }
+
+ return 0;
+}
+
+static int read_payload_length_info(struct LATMContext *ctx, GetBitContext *gb)
+{
+ uint8_t tmp;
+
+ if (ctx->frame_length_type == 0) {
+ int mux_slot_length = 0;
+ do {
+ tmp = get_bits(gb, 8);
+ mux_slot_length += tmp;
+ } while (tmp == 255);
+ return mux_slot_length;
+ } else if (ctx->frame_length_type == 1) {
+ return ctx->frame_length;
+ } else if (ctx->frame_length_type == 3 ||
+ ctx->frame_length_type == 5 ||
+ ctx->frame_length_type == 7) {
+ skip_bits(gb, 2); // mux_slot_length_coded
+ }
+ return 0;
+}
+
+static int read_audio_mux_element(struct LATMContext *latmctx,
+ GetBitContext *gb)
+{
+ int err;
+ uint8_t use_same_mux = get_bits(gb, 1);
+ if (!use_same_mux) {
+ if ((err = read_stream_mux_config(latmctx, gb)) < 0)
+ return err;
+ } else if (!latmctx->aac_ctx.avctx->extradata) {
+ av_log(latmctx->aac_ctx.avctx, AV_LOG_DEBUG,
+ "no decoder config found\n");
+ return AVERROR(EAGAIN);
+ }
+ if (latmctx->audio_mux_version_A == 0) {
+ int mux_slot_length_bytes = read_payload_length_info(latmctx, gb);
+ if (mux_slot_length_bytes * 8 > get_bits_left(gb)) {
+ av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR, "incomplete frame\n");
+ return AVERROR_INVALIDDATA;
+ } else if (mux_slot_length_bytes * 8 + 256 < get_bits_left(gb)) {
+ av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
+ "frame length mismatch %d << %d\n",
+ mux_slot_length_bytes * 8, get_bits_left(gb));
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ return 0;
+}
+
+
+static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
+ AVPacket *avpkt)
+{
+ struct LATMContext *latmctx = avctx->priv_data;
+ int muxlength, err;
+ GetBitContext gb;
+
+ if (avpkt->size == 0)
+ return 0;
+
+ init_get_bits(&gb, avpkt->data, avpkt->size * 8);
+
+ // check for LOAS sync word
+ if (get_bits(&gb, 11) != LOAS_SYNC_WORD)
+ return AVERROR_INVALIDDATA;
+
+ muxlength = get_bits(&gb, 13) + 3;
+ // not enough data, the parser should have sorted this
+ if (muxlength > avpkt->size)
+ return AVERROR_INVALIDDATA;
+
+ if ((err = read_audio_mux_element(latmctx, &gb)) < 0)
+ return err;
+
+ if (!latmctx->initialized) {
+ if (!avctx->extradata) {
+ *out_size = 0;
+ return avpkt->size;
+ } else {
+ if ((err = aac_decode_init(avctx)) < 0)
+ return err;
+ latmctx->initialized = 1;
+ }
+ }
+
+ if (show_bits(&gb, 12) == 0xfff) {
+ av_log(latmctx->aac_ctx.avctx, AV_LOG_ERROR,
+ "ADTS header detected, probably as result of configuration "
+ "misparsing\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((err = aac_decode_frame_int(avctx, out, out_size, &gb)) < 0)
+ return err;
+
+ return muxlength;
+}
+
+av_cold static int latm_decode_init(AVCodecContext *avctx)
+{
+ struct LATMContext *latmctx = avctx->priv_data;
+ int ret;
+
+ ret = aac_decode_init(avctx);
+
+ if (avctx->extradata_size > 0) {
+ latmctx->initialized = !ret;
+ } else {
+ latmctx->initialized = 0;
+ }
+
+ return ret;
+}
+
+
+AVCodec ff_aac_decoder = {
+ "aac",
+ AVMEDIA_TYPE_AUDIO,
+ CODEC_ID_AAC,
+ sizeof(AACContext),
+ aac_decode_init,
+ NULL,
+ aac_decode_close,
+ aac_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
+ .sample_fmts = (const enum AVSampleFormat[]) {
+#if CONFIG_AUDIO_FLOAT
+ AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE
+#else
+ AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE
+#endif
+ },
+ .channel_layouts = aac_channel_layout,
+};
+
+/*
+ Note: This decoder filter is intended to decode LATM streams transferred
+ in MPEG transport streams which only contain one program.
+ To do a more complex LATM demuxing a separate LATM demuxer should be used.
+*/
+AVCodec ff_aac_latm_decoder = {
+ .name = "aac_latm",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_AAC_LATM,
+ .priv_data_size = sizeof(struct LATMContext),
+ .init = latm_decode_init,
+ .close = aac_decode_close,
+ .decode = latm_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
+ .sample_fmts = (const enum AVSampleFormat[]) {
+#if CONFIG_AUDIO_FLOAT
+ AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE
+#else
+ AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE
+#endif
+ },
+ .channel_layouts = aac_channel_layout,
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacdectab.h b/samples/rtsp_player/ffmpeg/libavcodec/aacdectab.h
new file mode 100755
index 0000000..0bccb84
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacdectab.h
@@ -0,0 +1,104 @@
+/*
+ * AAC decoder data
+ * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
+ * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC decoder data
+ * @author Oded Shimon ( ods15 ods15 dyndns org )
+ * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
+ */
+
+#ifndef AVCODEC_AACDECTAB_H
+#define AVCODEC_AACDECTAB_H
+
+#include "libavutil/audioconvert.h"
+#include "aac.h"
+
+#include <stdint.h>
+
+/* @name ltp_coef
+ * Table of the LTP coefficient (multiplied by 2)
+ */
+static const float ltp_coef[8] = {
+ 1.141658, 1.393232, 1.626008, 1.822608,
+ 1.969800, 2.135788, 2.2389202, 2.739066,
+};
+
+/* @name tns_tmp2_map
+ * Tables of the tmp2[] arrays of LPC coefficients used for TNS.
+ * The suffix _M_N[] indicate the values of coef_compress and coef_res
+ * respectively.
+ * @{
+ */
+static const float tns_tmp2_map_1_3[4] = {
+ 0.00000000, -0.43388373, 0.64278758, 0.34202015,
+};
+
+static const float tns_tmp2_map_0_3[8] = {
+ 0.00000000, -0.43388373, -0.78183150, -0.97492790,
+ 0.98480773, 0.86602539, 0.64278758, 0.34202015,
+};
+
+static const float tns_tmp2_map_1_4[8] = {
+ 0.00000000, -0.20791170, -0.40673664, -0.58778524,
+ 0.67369562, 0.52643216, 0.36124167, 0.18374951,
+};
+
+static const float tns_tmp2_map_0_4[16] = {
+ 0.00000000, -0.20791170, -0.40673664, -0.58778524,
+ -0.74314481, -0.86602539, -0.95105654, -0.99452192,
+ 0.99573416, 0.96182561, 0.89516330, 0.79801720,
+ 0.67369562, 0.52643216, 0.36124167, 0.18374951,
+};
+
+static const float * const tns_tmp2_map[4] = {
+ tns_tmp2_map_0_3,
+ tns_tmp2_map_0_4,
+ tns_tmp2_map_1_3,
+ tns_tmp2_map_1_4
+};
+// @}
+
+static const int8_t tags_per_config[16] = { 0, 1, 1, 2, 3, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+static const uint8_t aac_channel_layout_map[7][5][2] = {
+ { { TYPE_SCE, 0 }, },
+ { { TYPE_CPE, 0 }, },
+ { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, },
+ { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_SCE, 1 }, },
+ { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_CPE, 1 }, },
+ { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 1 }, },
+ { { TYPE_CPE, 0 }, { TYPE_SCE, 0 }, { TYPE_LFE, 0 }, { TYPE_CPE, 2 }, { TYPE_CPE, 1 }, },
+};
+
+static const int64_t aac_channel_layout[8] = {
+ AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ AV_CH_LAYOUT_SURROUND,
+ AV_CH_LAYOUT_4POINT0,
+ AV_CH_LAYOUT_5POINT0_BACK,
+ AV_CH_LAYOUT_5POINT1_BACK,
+ AV_CH_LAYOUT_7POINT1_WIDE,
+ 0,
+};
+
+#endif /* AVCODEC_AACDECTAB_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacenc.c b/samples/rtsp_player/ffmpeg/libavcodec/aacenc.c
new file mode 100755
index 0000000..8843cbd
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacenc.c
@@ -0,0 +1,660 @@
+/*
+ * AAC encoder
+ * Copyright (C) 2008 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC encoder
+ */
+
+/***********************************
+ * TODOs:
+ * add sane pulse detection
+ * add temporal noise shaping
+ ***********************************/
+
+#include "avcodec.h"
+#include "put_bits.h"
+#include "dsputil.h"
+#include "mpeg4audio.h"
+#include "kbdwin.h"
+#include "sinewin.h"
+
+#include "aac.h"
+#include "aactab.h"
+#include "aacenc.h"
+
+#include "psymodel.h"
+
+#define AAC_MAX_CHANNELS 6
+
+static const uint8_t swb_size_1024_96[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8,
+ 12, 12, 12, 12, 12, 16, 16, 24, 28, 36, 44,
+ 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64
+};
+
+static const uint8_t swb_size_1024_64[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8,
+ 12, 12, 12, 16, 16, 16, 20, 24, 24, 28, 36,
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40
+};
+
+static const uint8_t swb_size_1024_48[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8,
+ 12, 12, 12, 12, 16, 16, 20, 20, 24, 24, 28, 28,
+ 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32,
+ 96
+};
+
+static const uint8_t swb_size_1024_32[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8,
+ 12, 12, 12, 12, 16, 16, 20, 20, 24, 24, 28, 28,
+ 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32
+};
+
+static const uint8_t swb_size_1024_24[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 12, 12, 12, 12, 16, 16, 16, 20, 20, 24, 24, 28, 28,
+ 32, 36, 36, 40, 44, 48, 52, 52, 64, 64, 64, 64, 64
+};
+
+static const uint8_t swb_size_1024_16[] = {
+ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 16, 16, 16, 16, 20, 20, 20, 24, 24, 28, 28,
+ 32, 36, 40, 40, 44, 48, 52, 56, 60, 64, 64, 64
+};
+
+static const uint8_t swb_size_1024_8[] = {
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20, 24, 24, 24, 28, 28,
+ 32, 36, 36, 40, 44, 48, 52, 56, 60, 64, 80
+};
+
+static const uint8_t *swb_size_1024[] = {
+ swb_size_1024_96, swb_size_1024_96, swb_size_1024_64,
+ swb_size_1024_48, swb_size_1024_48, swb_size_1024_32,
+ swb_size_1024_24, swb_size_1024_24, swb_size_1024_16,
+ swb_size_1024_16, swb_size_1024_16, swb_size_1024_8
+};
+
+static const uint8_t swb_size_128_96[] = {
+ 4, 4, 4, 4, 4, 4, 8, 8, 8, 16, 28, 36
+};
+
+static const uint8_t swb_size_128_48[] = {
+ 4, 4, 4, 4, 4, 8, 8, 8, 12, 12, 12, 16, 16, 16
+};
+
+static const uint8_t swb_size_128_24[] = {
+ 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 12, 12, 16, 16, 20
+};
+
+static const uint8_t swb_size_128_16[] = {
+ 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 12, 12, 16, 20, 20
+};
+
+static const uint8_t swb_size_128_8[] = {
+ 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 12, 16, 20, 20
+};
+
+static const uint8_t *swb_size_128[] = {
+ /* the last entry on the following row is swb_size_128_64 but is a
+ duplicate of swb_size_128_96 */
+ swb_size_128_96, swb_size_128_96, swb_size_128_96,
+ swb_size_128_48, swb_size_128_48, swb_size_128_48,
+ swb_size_128_24, swb_size_128_24, swb_size_128_16,
+ swb_size_128_16, swb_size_128_16, swb_size_128_8
+};
+
+/** default channel configurations */
+static const uint8_t aac_chan_configs[6][5] = {
+ {1, TYPE_SCE}, // 1 channel - single channel element
+ {1, TYPE_CPE}, // 2 channels - channel pair
+ {2, TYPE_SCE, TYPE_CPE}, // 3 channels - center + stereo
+ {3, TYPE_SCE, TYPE_CPE, TYPE_SCE}, // 4 channels - front center + stereo + back center
+ {3, TYPE_SCE, TYPE_CPE, TYPE_CPE}, // 5 channels - front center + stereo + back stereo
+ {4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE
+};
+
+/**
+ * Make AAC audio config object.
+ * @see 1.6.2.1 "Syntax - AudioSpecificConfig"
+ */
+static void put_audio_specific_config(AVCodecContext *avctx)
+{
+ PutBitContext pb;
+ AACEncContext *s = avctx->priv_data;
+
+ init_put_bits(&pb, avctx->extradata, avctx->extradata_size*8);
+ put_bits(&pb, 5, 2); //object type - AAC-LC
+ put_bits(&pb, 4, s->samplerate_index); //sample rate index
+ put_bits(&pb, 4, avctx->channels);
+ //GASpecificConfig
+ put_bits(&pb, 1, 0); //frame length - 1024 samples
+ put_bits(&pb, 1, 0); //does not depend on core coder
+ put_bits(&pb, 1, 0); //is not extension
+
+ //Explicitly Mark SBR absent
+ put_bits(&pb, 11, 0x2b7); //sync extension
+ put_bits(&pb, 5, AOT_SBR);
+ put_bits(&pb, 1, 0);
+ flush_put_bits(&pb);
+}
+
+static av_cold int aac_encode_init(AVCodecContext *avctx)
+{
+ AACEncContext *s = avctx->priv_data;
+ int i;
+ const uint8_t *sizes[2];
+ int lengths[2];
+
+ avctx->frame_size = 1024;
+
+ for (i = 0; i < 16; i++)
+ if (avctx->sample_rate == ff_mpeg4audio_sample_rates[i])
+ break;
+ if (i == 16) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate);
+ return -1;
+ }
+ if (avctx->channels > AAC_MAX_CHANNELS) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels);
+ return -1;
+ }
+ if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile);
+ return -1;
+ }
+ if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) {
+ av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n");
+ return -1;
+ }
+ s->samplerate_index = i;
+
+ dsputil_init(&s->dsp, avctx);
+ ff_mdct_init(&s->mdct1024, 11, 0, 1.0);
+ ff_mdct_init(&s->mdct128, 8, 0, 1.0);
+ // window init
+ ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
+ ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
+ ff_init_ff_sine_windows(10);
+ ff_init_ff_sine_windows(7);
+
+ s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0]));
+ s->cpe = av_mallocz(sizeof(ChannelElement) * aac_chan_configs[avctx->channels-1][0]);
+ avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE);
+ avctx->extradata_size = 5;
+ put_audio_specific_config(avctx);
+
+ sizes[0] = swb_size_1024[i];
+ sizes[1] = swb_size_128[i];
+ lengths[0] = ff_aac_num_swb_1024[i];
+ lengths[1] = ff_aac_num_swb_128[i];
+ ff_psy_init(&s->psy, avctx, 2, sizes, lengths);
+ s->psypp = ff_psy_preprocess_init(avctx);
+ s->coder = &ff_aac_coders[2];
+
+ s->lambda = avctx->global_quality ? avctx->global_quality : 120;
+
+ ff_aac_tableinit();
+
+ return 0;
+}
+
+static void apply_window_and_mdct(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce, short *audio)
+{
+ int i, k;
+ const int chans = avctx->channels;
+ const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
+ const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
+ const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
+ float *output = sce->ret;
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ memcpy(output, sce->saved, sizeof(float)*1024);
+ if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) {
+ memset(output, 0, sizeof(output[0]) * 448);
+ for (i = 448; i < 576; i++)
+ output[i] = sce->saved[i] * pwindow[i - 448];
+ for (i = 576; i < 704; i++)
+ output[i] = sce->saved[i];
+ }
+ if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) {
+ for (i = 0; i < 1024; i++) {
+ output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1];
+ sce->saved[i] = audio[i * chans] * lwindow[i];
+ }
+ } else {
+ for (i = 0; i < 448; i++)
+ output[i+1024] = audio[i * chans];
+ for (; i < 576; i++)
+ output[i+1024] = audio[i * chans] * swindow[576 - i - 1];
+ memset(output+1024+576, 0, sizeof(output[0]) * 448);
+ for (i = 0; i < 1024; i++)
+ sce->saved[i] = audio[i * chans];
+ }
+ s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
+ } else {
+ for (k = 0; k < 1024; k += 128) {
+ for (i = 448 + k; i < 448 + k + 256; i++)
+ output[i - 448 - k] = (i < 1024)
+ ? sce->saved[i]
+ : audio[(i-1024)*chans];
+ s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128);
+ s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128);
+ s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output);
+ }
+ for (i = 0; i < 1024; i++)
+ sce->saved[i] = audio[i * chans];
+ }
+}
+
+/**
+ * Encode ics_info element.
+ * @see Table 4.6 (syntax of ics_info)
+ */
+static void put_ics_info(AACEncContext *s, IndividualChannelStream *info)
+{
+ int w;
+
+ put_bits(&s->pb, 1, 0); // ics_reserved bit
+ put_bits(&s->pb, 2, info->window_sequence[0]);
+ put_bits(&s->pb, 1, info->use_kb_window[0]);
+ if (info->window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ put_bits(&s->pb, 6, info->max_sfb);
+ put_bits(&s->pb, 1, 0); // no prediction
+ } else {
+ put_bits(&s->pb, 4, info->max_sfb);
+ for (w = 1; w < 8; w++)
+ put_bits(&s->pb, 1, !info->group_len[w]);
+ }
+}
+
+/**
+ * Encode MS data.
+ * @see 4.6.8.1 "Joint Coding - M/S Stereo"
+ */
+static void encode_ms_info(PutBitContext *pb, ChannelElement *cpe)
+{
+ int i, w;
+
+ put_bits(pb, 2, cpe->ms_mode);
+ if (cpe->ms_mode == 1)
+ for (w = 0; w < cpe->ch[0].ics.num_windows; w += cpe->ch[0].ics.group_len[w])
+ for (i = 0; i < cpe->ch[0].ics.max_sfb; i++)
+ put_bits(pb, 1, cpe->ms_mask[w*16 + i]);
+}
+
+/**
+ * Produce integer coefficients from scalefactors provided by the model.
+ */
+static void adjust_frame_information(AACEncContext *apc, ChannelElement *cpe, int chans)
+{
+ int i, w, w2, g, ch;
+ int start, maxsfb, cmaxsfb;
+
+ for (ch = 0; ch < chans; ch++) {
+ IndividualChannelStream *ics = &cpe->ch[ch].ics;
+ start = 0;
+ maxsfb = 0;
+ cpe->ch[ch].pulse.num_pulse = 0;
+ for (w = 0; w < ics->num_windows*16; w += 16) {
+ for (g = 0; g < ics->num_swb; g++) {
+ //apply M/S
+ if (cpe->common_window && !ch && cpe->ms_mask[w + g]) {
+ for (i = 0; i < ics->swb_sizes[g]; i++) {
+ cpe->ch[0].coeffs[start+i] = (cpe->ch[0].coeffs[start+i] + cpe->ch[1].coeffs[start+i]) / 2.0;
+ cpe->ch[1].coeffs[start+i] = cpe->ch[0].coeffs[start+i] - cpe->ch[1].coeffs[start+i];
+ }
+ }
+ start += ics->swb_sizes[g];
+ }
+ for (cmaxsfb = ics->num_swb; cmaxsfb > 0 && cpe->ch[ch].zeroes[w+cmaxsfb-1]; cmaxsfb--)
+ ;
+ maxsfb = FFMAX(maxsfb, cmaxsfb);
+ }
+ ics->max_sfb = maxsfb;
+
+ //adjust zero bands for window groups
+ for (w = 0; w < ics->num_windows; w += ics->group_len[w]) {
+ for (g = 0; g < ics->max_sfb; g++) {
+ i = 1;
+ for (w2 = w; w2 < w + ics->group_len[w]; w2++) {
+ if (!cpe->ch[ch].zeroes[w2*16 + g]) {
+ i = 0;
+ break;
+ }
+ }
+ cpe->ch[ch].zeroes[w*16 + g] = i;
+ }
+ }
+ }
+
+ if (chans > 1 && cpe->common_window) {
+ IndividualChannelStream *ics0 = &cpe->ch[0].ics;
+ IndividualChannelStream *ics1 = &cpe->ch[1].ics;
+ int msc = 0;
+ ics0->max_sfb = FFMAX(ics0->max_sfb, ics1->max_sfb);
+ ics1->max_sfb = ics0->max_sfb;
+ for (w = 0; w < ics0->num_windows*16; w += 16)
+ for (i = 0; i < ics0->max_sfb; i++)
+ if (cpe->ms_mask[w+i])
+ msc++;
+ if (msc == 0 || ics0->max_sfb == 0)
+ cpe->ms_mode = 0;
+ else
+ cpe->ms_mode = msc < ics0->max_sfb ? 1 : 2;
+ }
+}
+
+/**
+ * Encode scalefactor band coding type.
+ */
+static void encode_band_info(AACEncContext *s, SingleChannelElement *sce)
+{
+ int w;
+
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w])
+ s->coder->encode_window_bands_info(s, sce, w, sce->ics.group_len[w], s->lambda);
+}
+
+/**
+ * Encode scalefactors.
+ */
+static void encode_scale_factors(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce)
+{
+ int off = sce->sf_idx[0], diff;
+ int i, w;
+
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ for (i = 0; i < sce->ics.max_sfb; i++) {
+ if (!sce->zeroes[w*16 + i]) {
+ diff = sce->sf_idx[w*16 + i] - off + SCALE_DIFF_ZERO;
+ if (diff < 0 || diff > 120)
+ av_log(avctx, AV_LOG_ERROR, "Scalefactor difference is too big to be coded\n");
+ off = sce->sf_idx[w*16 + i];
+ put_bits(&s->pb, ff_aac_scalefactor_bits[diff], ff_aac_scalefactor_code[diff]);
+ }
+ }
+ }
+}
+
+/**
+ * Encode pulse data.
+ */
+static void encode_pulses(AACEncContext *s, Pulse *pulse)
+{
+ int i;
+
+ put_bits(&s->pb, 1, !!pulse->num_pulse);
+ if (!pulse->num_pulse)
+ return;
+
+ put_bits(&s->pb, 2, pulse->num_pulse - 1);
+ put_bits(&s->pb, 6, pulse->start);
+ for (i = 0; i < pulse->num_pulse; i++) {
+ put_bits(&s->pb, 5, pulse->pos[i]);
+ put_bits(&s->pb, 4, pulse->amp[i]);
+ }
+}
+
+/**
+ * Encode spectral coefficients processed by psychoacoustic model.
+ */
+static void encode_spectral_coeffs(AACEncContext *s, SingleChannelElement *sce)
+{
+ int start, i, w, w2;
+
+ for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
+ start = 0;
+ for (i = 0; i < sce->ics.max_sfb; i++) {
+ if (sce->zeroes[w*16 + i]) {
+ start += sce->ics.swb_sizes[i];
+ continue;
+ }
+ for (w2 = w; w2 < w + sce->ics.group_len[w]; w2++)
+ s->coder->quantize_and_encode_band(s, &s->pb, sce->coeffs + start + w2*128,
+ sce->ics.swb_sizes[i],
+ sce->sf_idx[w*16 + i],
+ sce->band_type[w*16 + i],
+ s->lambda);
+ start += sce->ics.swb_sizes[i];
+ }
+ }
+}
+
+/**
+ * Encode one channel of audio data.
+ */
+static int encode_individual_channel(AVCodecContext *avctx, AACEncContext *s,
+ SingleChannelElement *sce,
+ int common_window)
+{
+ put_bits(&s->pb, 8, sce->sf_idx[0]);
+ if (!common_window)
+ put_ics_info(s, &sce->ics);
+ encode_band_info(s, sce);
+ encode_scale_factors(avctx, s, sce);
+ encode_pulses(s, &sce->pulse);
+ put_bits(&s->pb, 1, 0); //tns
+ put_bits(&s->pb, 1, 0); //ssr
+ encode_spectral_coeffs(s, sce);
+ return 0;
+}
+
+/**
+ * Write some auxiliary information about the created AAC file.
+ */
+static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
+ const char *name)
+{
+ int i, namelen, padbits;
+
+ namelen = strlen(name) + 2;
+ put_bits(&s->pb, 3, TYPE_FIL);
+ put_bits(&s->pb, 4, FFMIN(namelen, 15));
+ if (namelen >= 15)
+ put_bits(&s->pb, 8, namelen - 16);
+ put_bits(&s->pb, 4, 0); //extension type - filler
+ padbits = 8 - (put_bits_count(&s->pb) & 7);
+ align_put_bits(&s->pb);
+ for (i = 0; i < namelen - 2; i++)
+ put_bits(&s->pb, 8, name[i]);
+ put_bits(&s->pb, 12 - padbits, 0);
+}
+
+static int aac_encode_frame(AVCodecContext *avctx,
+ uint8_t *frame, int buf_size, void *data)
+{
+ AACEncContext *s = avctx->priv_data;
+ int16_t *samples = s->samples, *samples2, *la;
+ ChannelElement *cpe;
+ int i, j, chans, tag, start_ch;
+ const uint8_t *chan_map = aac_chan_configs[avctx->channels-1];
+ int chan_el_counter[4];
+ FFPsyWindowInfo windows[AAC_MAX_CHANNELS];
+
+ if (s->last_frame)
+ return 0;
+ if (data) {
+ if (!s->psypp) {
+ memcpy(s->samples + 1024 * avctx->channels, data,
+ 1024 * avctx->channels * sizeof(s->samples[0]));
+ } else {
+ start_ch = 0;
+ samples2 = s->samples + 1024 * avctx->channels;
+ for (i = 0; i < chan_map[0]; i++) {
+ tag = chan_map[i+1];
+ chans = tag == TYPE_CPE ? 2 : 1;
+ ff_psy_preprocess(s->psypp, (uint16_t*)data + start_ch,
+ samples2 + start_ch, start_ch, chans);
+ start_ch += chans;
+ }
+ }
+ }
+ if (!avctx->frame_number) {
+ memcpy(s->samples, s->samples + 1024 * avctx->channels,
+ 1024 * avctx->channels * sizeof(s->samples[0]));
+ return 0;
+ }
+
+ start_ch = 0;
+ for (i = 0; i < chan_map[0]; i++) {
+ FFPsyWindowInfo* wi = windows + start_ch;
+ tag = chan_map[i+1];
+ chans = tag == TYPE_CPE ? 2 : 1;
+ cpe = &s->cpe[i];
+ for (j = 0; j < chans; j++) {
+ IndividualChannelStream *ics = &cpe->ch[j].ics;
+ int k;
+ int cur_channel = start_ch + j;
+ samples2 = samples + cur_channel;
+ la = samples2 + (448+64) * avctx->channels;
+ if (!data)
+ la = NULL;
+ if (tag == TYPE_LFE) {
+ wi[j].window_type[0] = ONLY_LONG_SEQUENCE;
+ wi[j].window_shape = 0;
+ wi[j].num_windows = 1;
+ wi[j].grouping[0] = 1;
+ } else {
+ wi[j] = ff_psy_suggest_window(&s->psy, samples2, la, cur_channel,
+ ics->window_sequence[0]);
+ }
+ ics->window_sequence[1] = ics->window_sequence[0];
+ ics->window_sequence[0] = wi[j].window_type[0];
+ ics->use_kb_window[1] = ics->use_kb_window[0];
+ ics->use_kb_window[0] = wi[j].window_shape;
+ ics->num_windows = wi[j].num_windows;
+ ics->swb_sizes = s->psy.bands [ics->num_windows == 8];
+ ics->num_swb = tag == TYPE_LFE ? 12 : s->psy.num_bands[ics->num_windows == 8];
+ for (k = 0; k < ics->num_windows; k++)
+ ics->group_len[k] = wi[j].grouping[k];
+
+ apply_window_and_mdct(avctx, s, &cpe->ch[j], samples2);
+ }
+ start_ch += chans;
+ }
+ do {
+ int frame_bits;
+ init_put_bits(&s->pb, frame, buf_size*8);
+ if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT))
+ put_bitstream_info(avctx, s, LIBAVCODEC_IDENT);
+ start_ch = 0;
+ memset(chan_el_counter, 0, sizeof(chan_el_counter));
+ for (i = 0; i < chan_map[0]; i++) {
+ FFPsyWindowInfo* wi = windows + start_ch;
+ tag = chan_map[i+1];
+ chans = tag == TYPE_CPE ? 2 : 1;
+ cpe = &s->cpe[i];
+ put_bits(&s->pb, 3, tag);
+ put_bits(&s->pb, 4, chan_el_counter[tag]++);
+ for (j = 0; j < chans; j++) {
+ s->cur_channel = start_ch + j;
+ ff_psy_set_band_info(&s->psy, s->cur_channel, cpe->ch[j].coeffs, &wi[j]);
+ s->coder->search_for_quantizers(avctx, s, &cpe->ch[j], s->lambda);
+ }
+ cpe->common_window = 0;
+ if (chans > 1
+ && wi[0].window_type[0] == wi[1].window_type[0]
+ && wi[0].window_shape == wi[1].window_shape) {
+
+ cpe->common_window = 1;
+ for (j = 0; j < wi[0].num_windows; j++) {
+ if (wi[0].grouping[j] != wi[1].grouping[j]) {
+ cpe->common_window = 0;
+ break;
+ }
+ }
+ }
+ s->cur_channel = start_ch;
+ if (cpe->common_window && s->coder->search_for_ms)
+ s->coder->search_for_ms(s, cpe, s->lambda);
+ adjust_frame_information(s, cpe, chans);
+ if (chans == 2) {
+ put_bits(&s->pb, 1, cpe->common_window);
+ if (cpe->common_window) {
+ put_ics_info(s, &cpe->ch[0].ics);
+ encode_ms_info(&s->pb, cpe);
+ }
+ }
+ for (j = 0; j < chans; j++) {
+ s->cur_channel = start_ch + j;
+ encode_individual_channel(avctx, s, &cpe->ch[j], cpe->common_window);
+ }
+ start_ch += chans;
+ }
+
+ frame_bits = put_bits_count(&s->pb);
+ if (frame_bits <= 6144 * avctx->channels - 3) {
+ s->psy.bitres.bits = frame_bits / avctx->channels;
+ break;
+ }
+
+ s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;
+
+ } while (1);
+
+ put_bits(&s->pb, 3, TYPE_END);
+ flush_put_bits(&s->pb);
+ avctx->frame_bits = put_bits_count(&s->pb);
+
+ // rate control stuff
+ if (!(avctx->flags & CODEC_FLAG_QSCALE)) {
+ float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits;
+ s->lambda *= ratio;
+ s->lambda = FFMIN(s->lambda, 65536.f);
+ }
+
+ if (!data)
+ s->last_frame = 1;
+ memcpy(s->samples, s->samples + 1024 * avctx->channels,
+ 1024 * avctx->channels * sizeof(s->samples[0]));
+ return put_bits_count(&s->pb)>>3;
+}
+
+static av_cold int aac_encode_end(AVCodecContext *avctx)
+{
+ AACEncContext *s = avctx->priv_data;
+
+ ff_mdct_end(&s->mdct1024);
+ ff_mdct_end(&s->mdct128);
+ ff_psy_end(&s->psy);
+ ff_psy_preprocess_end(s->psypp);
+ av_freep(&s->samples);
+ av_freep(&s->cpe);
+ return 0;
+}
+
+AVCodec ff_aac_encoder = {
+ "aac",
+ AVMEDIA_TYPE_AUDIO,
+ CODEC_ID_AAC,
+ sizeof(AACEncContext),
+ aac_encode_init,
+ aac_encode_frame,
+ aac_encode_end,
+ .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
+ .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
+ .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacenc.h b/samples/rtsp_player/ffmpeg/libavcodec/aacenc.h
new file mode 100755
index 0000000..1c84679
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacenc.h
@@ -0,0 +1,70 @@
+/*
+ * AAC encoder
+ * Copyright (C) 2008 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AACENC_H
+#define AVCODEC_AACENC_H
+
+#include "avcodec.h"
+#include "put_bits.h"
+#include "dsputil.h"
+
+#include "aac.h"
+
+#include "psymodel.h"
+
+struct AACEncContext;
+
+typedef struct AACCoefficientsEncoder {
+ void (*search_for_quantizers)(AVCodecContext *avctx, struct AACEncContext *s,
+ SingleChannelElement *sce, const float lambda);
+ void (*encode_window_bands_info)(struct AACEncContext *s, SingleChannelElement *sce,
+ int win, int group_len, const float lambda);
+ void (*quantize_and_encode_band)(struct AACEncContext *s, PutBitContext *pb, const float *in, int size,
+ int scale_idx, int cb, const float lambda);
+ void (*search_for_ms)(struct AACEncContext *s, ChannelElement *cpe, const float lambda);
+} AACCoefficientsEncoder;
+
+extern AACCoefficientsEncoder ff_aac_coders[];
+
+/**
+ * AAC encoder context
+ */
+typedef struct AACEncContext {
+ PutBitContext pb;
+ FFTContext mdct1024; ///< long (1024 samples) frame transform context
+ FFTContext mdct128; ///< short (128 samples) frame transform context
+ DSPContext dsp;
+ int16_t *samples; ///< saved preprocessed input
+
+ int samplerate_index; ///< MPEG-4 samplerate index
+
+ ChannelElement *cpe; ///< channel elements
+ FFPsyContext psy;
+ struct FFPsyPreprocessContext* psypp;
+ AACCoefficientsEncoder *coder;
+ int cur_channel;
+ int last_frame;
+ float lambda;
+ DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
+ DECLARE_ALIGNED(16, float, scoefs)[1024]; ///< scaled coefficients
+} AACEncContext;
+
+#endif /* AVCODEC_AACENC_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacps.c b/samples/rtsp_player/ffmpeg/libavcodec/aacps.c
new file mode 100755
index 0000000..fc124d1
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacps.c
@@ -0,0 +1,1037 @@
+/*
+ * MPEG-4 Parametric Stereo decoding functions
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include "libavutil/common.h"
+#include "libavutil/mathematics.h"
+#include "avcodec.h"
+#include "get_bits.h"
+#include "aacps.h"
+#include "aacps_tablegen.h"
+#include "aacpsdata.c"
+
+#define PS_BASELINE 0 //< Operate in Baseline PS mode
+ //< Baseline implies 10 or 20 stereo bands,
+ //< mixing mode A, and no ipd/opd
+
+#define numQMFSlots 32 //numTimeSlots * RATE
+
+static const int8_t num_env_tab[2][4] = {
+ { 0, 1, 2, 4, },
+ { 1, 2, 3, 4, },
+};
+
+static const int8_t nr_iidicc_par_tab[] = {
+ 10, 20, 34, 10, 20, 34,
+};
+
+static const int8_t nr_iidopd_par_tab[] = {
+ 5, 11, 17, 5, 11, 17,
+};
+
+enum {
+ huff_iid_df1,
+ huff_iid_dt1,
+ huff_iid_df0,
+ huff_iid_dt0,
+ huff_icc_df,
+ huff_icc_dt,
+ huff_ipd_df,
+ huff_ipd_dt,
+ huff_opd_df,
+ huff_opd_dt,
+};
+
+static const int huff_iid[] = {
+ huff_iid_df0,
+ huff_iid_df1,
+ huff_iid_dt0,
+ huff_iid_dt1,
+};
+
+static VLC vlc_ps[10];
+
+/**
+ * Read Inter-channel Intensity Difference/Inter-Channel Coherence/
+ * Inter-channel Phase Difference/Overall Phase Difference parameters from the
+ * bitstream.
+ *
+ * @param avctx contains the current codec context
+ * @param gb pointer to the input bitstream
+ * @param ps pointer to the Parametric Stereo context
+ * @param par pointer to the parameter to be read
+ * @param e envelope to decode
+ * @param dt 1: time delta-coded, 0: frequency delta-coded
+ */
+#define READ_PAR_DATA(PAR, OFFSET, MASK, ERR_CONDITION) \
+static int read_ ## PAR ## _data(AVCodecContext *avctx, GetBitContext *gb, PSContext *ps, \
+ int8_t (*PAR)[PS_MAX_NR_IIDICC], int table_idx, int e, int dt) \
+{ \
+ int b, num = ps->nr_ ## PAR ## _par; \
+ VLC_TYPE (*vlc_table)[2] = vlc_ps[table_idx].table; \
+ if (dt) { \
+ int e_prev = e ? e - 1 : ps->num_env_old - 1; \
+ e_prev = FFMAX(e_prev, 0); \
+ for (b = 0; b < num; b++) { \
+ int val = PAR[e_prev][b] + get_vlc2(gb, vlc_table, 9, 3) - OFFSET; \
+ if (MASK) val &= MASK; \
+ PAR[e][b] = val; \
+ if (ERR_CONDITION) \
+ goto err; \
+ } \
+ } else { \
+ int val = 0; \
+ for (b = 0; b < num; b++) { \
+ val += get_vlc2(gb, vlc_table, 9, 3) - OFFSET; \
+ if (MASK) val &= MASK; \
+ PAR[e][b] = val; \
+ if (ERR_CONDITION) \
+ goto err; \
+ } \
+ } \
+ return 0; \
+err: \
+ av_log(avctx, AV_LOG_ERROR, "illegal "#PAR"\n"); \
+ return -1; \
+}
+
+READ_PAR_DATA(iid, huff_offset[table_idx], 0, FFABS(ps->iid_par[e][b]) > 7 + 8 * ps->iid_quant)
+READ_PAR_DATA(icc, huff_offset[table_idx], 0, ps->icc_par[e][b] > 7U)
+READ_PAR_DATA(ipdopd, 0, 0x07, 0)
+
+static int ps_read_extension_data(GetBitContext *gb, PSContext *ps, int ps_extension_id)
+{
+ int e;
+ int count = get_bits_count(gb);
+
+ if (ps_extension_id)
+ return 0;
+
+ ps->enable_ipdopd = get_bits1(gb);
+ if (ps->enable_ipdopd) {
+ for (e = 0; e < ps->num_env; e++) {
+ int dt = get_bits1(gb);
+ read_ipdopd_data(NULL, gb, ps, ps->ipd_par, dt ? huff_ipd_dt : huff_ipd_df, e, dt);
+ dt = get_bits1(gb);
+ read_ipdopd_data(NULL, gb, ps, ps->opd_par, dt ? huff_opd_dt : huff_opd_df, e, dt);
+ }
+ }
+ skip_bits1(gb); //reserved_ps
+ return get_bits_count(gb) - count;
+}
+
+static void ipdopd_reset(int8_t *opd_hist, int8_t *ipd_hist)
+{
+ int i;
+ for (i = 0; i < PS_MAX_NR_IPDOPD; i++) {
+ opd_hist[i] = 0;
+ ipd_hist[i] = 0;
+ }
+}
+
+int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps, int bits_left)
+{
+ int e;
+ int bit_count_start = get_bits_count(gb_host);
+ int header;
+ int bits_consumed;
+ GetBitContext gbc = *gb_host, *gb = &gbc;
+
+ header = get_bits1(gb);
+ if (header) { //enable_ps_header
+ ps->enable_iid = get_bits1(gb);
+ if (ps->enable_iid) {
+ int iid_mode = get_bits(gb, 3);
+ if (iid_mode > 5) {
+ av_log(avctx, AV_LOG_ERROR, "iid_mode %d is reserved.\n",
+ iid_mode);
+ goto err;
+ }
+ ps->nr_iid_par = nr_iidicc_par_tab[iid_mode];
+ ps->iid_quant = iid_mode > 2;
+ ps->nr_ipdopd_par = nr_iidopd_par_tab[iid_mode];
+ }
+ ps->enable_icc = get_bits1(gb);
+ if (ps->enable_icc) {
+ ps->icc_mode = get_bits(gb, 3);
+ if (ps->icc_mode > 5) {
+ av_log(avctx, AV_LOG_ERROR, "icc_mode %d is reserved.\n",
+ ps->icc_mode);
+ goto err;
+ }
+ ps->nr_icc_par = nr_iidicc_par_tab[ps->icc_mode];
+ }
+ ps->enable_ext = get_bits1(gb);
+ }
+
+ ps->frame_class = get_bits1(gb);
+ ps->num_env_old = ps->num_env;
+ ps->num_env = num_env_tab[ps->frame_class][get_bits(gb, 2)];
+
+ ps->border_position[0] = -1;
+ if (ps->frame_class) {
+ for (e = 1; e <= ps->num_env; e++)
+ ps->border_position[e] = get_bits(gb, 5);
+ } else
+ for (e = 1; e <= ps->num_env; e++)
+ ps->border_position[e] = (e * numQMFSlots >> ff_log2_tab[ps->num_env]) - 1;
+
+ if (ps->enable_iid) {
+ for (e = 0; e < ps->num_env; e++) {
+ int dt = get_bits1(gb);
+ if (read_iid_data(avctx, gb, ps, ps->iid_par, huff_iid[2*dt+ps->iid_quant], e, dt))
+ goto err;
+ }
+ } else
+ memset(ps->iid_par, 0, sizeof(ps->iid_par));
+
+ if (ps->enable_icc)
+ for (e = 0; e < ps->num_env; e++) {
+ int dt = get_bits1(gb);
+ if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt))
+ goto err;
+ }
+ else
+ memset(ps->icc_par, 0, sizeof(ps->icc_par));
+
+ if (ps->enable_ext) {
+ int cnt = get_bits(gb, 4);
+ if (cnt == 15) {
+ cnt += get_bits(gb, 8);
+ }
+ cnt *= 8;
+ while (cnt > 7) {
+ int ps_extension_id = get_bits(gb, 2);
+ cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);
+ }
+ if (cnt < 0) {
+ av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d", cnt);
+ goto err;
+ }
+ skip_bits(gb, cnt);
+ }
+
+ ps->enable_ipdopd &= !PS_BASELINE;
+
+ //Fix up envelopes
+ if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) {
+ //Create a fake envelope
+ int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1;
+ if (source >= 0 && source != ps->num_env) {
+ if (ps->enable_iid) {
+ memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0]));
+ }
+ if (ps->enable_icc) {
+ memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0]));
+ }
+ if (ps->enable_ipdopd) {
+ memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0]));
+ memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0]));
+ }
+ }
+ ps->num_env++;
+ ps->border_position[ps->num_env] = numQMFSlots - 1;
+ }
+
+
+ ps->is34bands_old = ps->is34bands;
+ if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc))
+ ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) ||
+ (ps->enable_icc && ps->nr_icc_par == 34);
+
+ //Baseline
+ if (!ps->enable_ipdopd) {
+ memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
+ memset(ps->opd_par, 0, sizeof(ps->opd_par));
+ }
+
+ if (header)
+ ps->start = 1;
+
+ bits_consumed = get_bits_count(gb) - bit_count_start;
+ if (bits_consumed <= bits_left) {
+ skip_bits_long(gb_host, bits_consumed);
+ return bits_consumed;
+ }
+ av_log(avctx, AV_LOG_ERROR, "Expected to read %d PS bits actually read %d.\n", bits_left, bits_consumed);
+err:
+ ps->start = 0;
+ skip_bits_long(gb_host, bits_left);
+ return bits_left;
+}
+
+/** Split one subband into 2 subsubbands with a symmetric real filter.
+ * The filter must have its non-center even coefficients equal to zero. */
+static void hybrid2_re(float (*in)[2], float (*out)[32][2], const float filter[7], int len, int reverse)
+{
+ int i, j;
+ for (i = 0; i < len; i++, in++) {
+ float re_in = filter[6] * in[6][0]; //real inphase
+ float re_op = 0.0f; //real out of phase
+ float im_in = filter[6] * in[6][1]; //imag inphase
+ float im_op = 0.0f; //imag out of phase
+ for (j = 0; j < 6; j += 2) {
+ re_op += filter[j+1] * (in[j+1][0] + in[12-j-1][0]);
+ im_op += filter[j+1] * (in[j+1][1] + in[12-j-1][1]);
+ }
+ out[ reverse][i][0] = re_in + re_op;
+ out[ reverse][i][1] = im_in + im_op;
+ out[!reverse][i][0] = re_in - re_op;
+ out[!reverse][i][1] = im_in - im_op;
+ }
+}
+
+/** Split one subband into 6 subsubbands with a complex filter */
+static void hybrid6_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int len)
+{
+ int i, j, ssb;
+ int N = 8;
+ float temp[8][2];
+
+ for (i = 0; i < len; i++, in++) {
+ for (ssb = 0; ssb < N; ssb++) {
+ float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
+ for (j = 0; j < 6; j++) {
+ float in0_re = in[j][0];
+ float in0_im = in[j][1];
+ float in1_re = in[12-j][0];
+ float in1_im = in[12-j][1];
+ sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
+ sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
+ }
+ temp[ssb][0] = sum_re;
+ temp[ssb][1] = sum_im;
+ }
+ out[0][i][0] = temp[6][0];
+ out[0][i][1] = temp[6][1];
+ out[1][i][0] = temp[7][0];
+ out[1][i][1] = temp[7][1];
+ out[2][i][0] = temp[0][0];
+ out[2][i][1] = temp[0][1];
+ out[3][i][0] = temp[1][0];
+ out[3][i][1] = temp[1][1];
+ out[4][i][0] = temp[2][0] + temp[5][0];
+ out[4][i][1] = temp[2][1] + temp[5][1];
+ out[5][i][0] = temp[3][0] + temp[4][0];
+ out[5][i][1] = temp[3][1] + temp[4][1];
+ }
+}
+
+static void hybrid4_8_12_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int N, int len)
+{
+ int i, j, ssb;
+
+ for (i = 0; i < len; i++, in++) {
+ for (ssb = 0; ssb < N; ssb++) {
+ float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
+ for (j = 0; j < 6; j++) {
+ float in0_re = in[j][0];
+ float in0_im = in[j][1];
+ float in1_re = in[12-j][0];
+ float in1_im = in[12-j][1];
+ sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
+ sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
+ }
+ out[ssb][i][0] = sum_re;
+ out[ssb][i][1] = sum_im;
+ }
+ }
+}
+
+static void hybrid_analysis(float out[91][32][2], float in[5][44][2], float L[2][38][64], int is34, int len)
+{
+ int i, j;
+ for (i = 0; i < 5; i++) {
+ for (j = 0; j < 38; j++) {
+ in[i][j+6][0] = L[0][j][i];
+ in[i][j+6][1] = L[1][j][i];
+ }
+ }
+ if (is34) {
+ hybrid4_8_12_cx(in[0], out, f34_0_12, 12, len);
+ hybrid4_8_12_cx(in[1], out+12, f34_1_8, 8, len);
+ hybrid4_8_12_cx(in[2], out+20, f34_2_4, 4, len);
+ hybrid4_8_12_cx(in[3], out+24, f34_2_4, 4, len);
+ hybrid4_8_12_cx(in[4], out+28, f34_2_4, 4, len);
+ for (i = 0; i < 59; i++) {
+ for (j = 0; j < len; j++) {
+ out[i+32][j][0] = L[0][j][i+5];
+ out[i+32][j][1] = L[1][j][i+5];
+ }
+ }
+ } else {
+ hybrid6_cx(in[0], out, f20_0_8, len);
+ hybrid2_re(in[1], out+6, g1_Q2, len, 1);
+ hybrid2_re(in[2], out+8, g1_Q2, len, 0);
+ for (i = 0; i < 61; i++) {
+ for (j = 0; j < len; j++) {
+ out[i+10][j][0] = L[0][j][i+3];
+ out[i+10][j][1] = L[1][j][i+3];
+ }
+ }
+ }
+ //update in_buf
+ for (i = 0; i < 5; i++) {
+ memcpy(in[i], in[i]+32, 6 * sizeof(in[i][0]));
+ }
+}
+
+static void hybrid_synthesis(float out[2][38][64], float in[91][32][2], int is34, int len)
+{
+ int i, n;
+ if (is34) {
+ for (n = 0; n < len; n++) {
+ memset(out[0][n], 0, 5*sizeof(out[0][n][0]));
+ memset(out[1][n], 0, 5*sizeof(out[1][n][0]));
+ for (i = 0; i < 12; i++) {
+ out[0][n][0] += in[ i][n][0];
+ out[1][n][0] += in[ i][n][1];
+ }
+ for (i = 0; i < 8; i++) {
+ out[0][n][1] += in[12+i][n][0];
+ out[1][n][1] += in[12+i][n][1];
+ }
+ for (i = 0; i < 4; i++) {
+ out[0][n][2] += in[20+i][n][0];
+ out[1][n][2] += in[20+i][n][1];
+ out[0][n][3] += in[24+i][n][0];
+ out[1][n][3] += in[24+i][n][1];
+ out[0][n][4] += in[28+i][n][0];
+ out[1][n][4] += in[28+i][n][1];
+ }
+ }
+ for (i = 0; i < 59; i++) {
+ for (n = 0; n < len; n++) {
+ out[0][n][i+5] = in[i+32][n][0];
+ out[1][n][i+5] = in[i+32][n][1];
+ }
+ }
+ } else {
+ for (n = 0; n < len; n++) {
+ out[0][n][0] = in[0][n][0] + in[1][n][0] + in[2][n][0] +
+ in[3][n][0] + in[4][n][0] + in[5][n][0];
+ out[1][n][0] = in[0][n][1] + in[1][n][1] + in[2][n][1] +
+ in[3][n][1] + in[4][n][1] + in[5][n][1];
+ out[0][n][1] = in[6][n][0] + in[7][n][0];
+ out[1][n][1] = in[6][n][1] + in[7][n][1];
+ out[0][n][2] = in[8][n][0] + in[9][n][0];
+ out[1][n][2] = in[8][n][1] + in[9][n][1];
+ }
+ for (i = 0; i < 61; i++) {
+ for (n = 0; n < len; n++) {
+ out[0][n][i+3] = in[i+10][n][0];
+ out[1][n][i+3] = in[i+10][n][1];
+ }
+ }
+ }
+}
+
+/// All-pass filter decay slope
+#define DECAY_SLOPE 0.05f
+/// Number of frequency bands that can be addressed by the parameter index, b(k)
+static const int NR_PAR_BANDS[] = { 20, 34 };
+/// Number of frequency bands that can be addressed by the sub subband index, k
+static const int NR_BANDS[] = { 71, 91 };
+/// Start frequency band for the all-pass filter decay slope
+static const int DECAY_CUTOFF[] = { 10, 32 };
+/// Number of all-pass filer bands
+static const int NR_ALLPASS_BANDS[] = { 30, 50 };
+/// First stereo band using the short one sample delay
+static const int SHORT_DELAY_BAND[] = { 42, 62 };
+
+/** Table 8.46 */
+static void map_idx_10_to_20(int8_t *par_mapped, const int8_t *par, int full)
+{
+ int b;
+ if (full)
+ b = 9;
+ else {
+ b = 4;
+ par_mapped[10] = 0;
+ }
+ for (; b >= 0; b--) {
+ par_mapped[2*b+1] = par_mapped[2*b] = par[b];
+ }
+}
+
+static void map_idx_34_to_20(int8_t *par_mapped, const int8_t *par, int full)
+{
+ par_mapped[ 0] = (2*par[ 0] + par[ 1]) / 3;
+ par_mapped[ 1] = ( par[ 1] + 2*par[ 2]) / 3;
+ par_mapped[ 2] = (2*par[ 3] + par[ 4]) / 3;
+ par_mapped[ 3] = ( par[ 4] + 2*par[ 5]) / 3;
+ par_mapped[ 4] = ( par[ 6] + par[ 7]) / 2;
+ par_mapped[ 5] = ( par[ 8] + par[ 9]) / 2;
+ par_mapped[ 6] = par[10];
+ par_mapped[ 7] = par[11];
+ par_mapped[ 8] = ( par[12] + par[13]) / 2;
+ par_mapped[ 9] = ( par[14] + par[15]) / 2;
+ par_mapped[10] = par[16];
+ if (full) {
+ par_mapped[11] = par[17];
+ par_mapped[12] = par[18];
+ par_mapped[13] = par[19];
+ par_mapped[14] = ( par[20] + par[21]) / 2;
+ par_mapped[15] = ( par[22] + par[23]) / 2;
+ par_mapped[16] = ( par[24] + par[25]) / 2;
+ par_mapped[17] = ( par[26] + par[27]) / 2;
+ par_mapped[18] = ( par[28] + par[29] + par[30] + par[31]) / 4;
+ par_mapped[19] = ( par[32] + par[33]) / 2;
+ }
+}
+
+static void map_val_34_to_20(float par[PS_MAX_NR_IIDICC])
+{
+ par[ 0] = (2*par[ 0] + par[ 1]) * 0.33333333f;
+ par[ 1] = ( par[ 1] + 2*par[ 2]) * 0.33333333f;
+ par[ 2] = (2*par[ 3] + par[ 4]) * 0.33333333f;
+ par[ 3] = ( par[ 4] + 2*par[ 5]) * 0.33333333f;
+ par[ 4] = ( par[ 6] + par[ 7]) * 0.5f;
+ par[ 5] = ( par[ 8] + par[ 9]) * 0.5f;
+ par[ 6] = par[10];
+ par[ 7] = par[11];
+ par[ 8] = ( par[12] + par[13]) * 0.5f;
+ par[ 9] = ( par[14] + par[15]) * 0.5f;
+ par[10] = par[16];
+ par[11] = par[17];
+ par[12] = par[18];
+ par[13] = par[19];
+ par[14] = ( par[20] + par[21]) * 0.5f;
+ par[15] = ( par[22] + par[23]) * 0.5f;
+ par[16] = ( par[24] + par[25]) * 0.5f;
+ par[17] = ( par[26] + par[27]) * 0.5f;
+ par[18] = ( par[28] + par[29] + par[30] + par[31]) * 0.25f;
+ par[19] = ( par[32] + par[33]) * 0.5f;
+}
+
+static void map_idx_10_to_34(int8_t *par_mapped, const int8_t *par, int full)
+{
+ if (full) {
+ par_mapped[33] = par[9];
+ par_mapped[32] = par[9];
+ par_mapped[31] = par[9];
+ par_mapped[30] = par[9];
+ par_mapped[29] = par[9];
+ par_mapped[28] = par[9];
+ par_mapped[27] = par[8];
+ par_mapped[26] = par[8];
+ par_mapped[25] = par[8];
+ par_mapped[24] = par[8];
+ par_mapped[23] = par[7];
+ par_mapped[22] = par[7];
+ par_mapped[21] = par[7];
+ par_mapped[20] = par[7];
+ par_mapped[19] = par[6];
+ par_mapped[18] = par[6];
+ par_mapped[17] = par[5];
+ par_mapped[16] = par[5];
+ } else {
+ par_mapped[16] = 0;
+ }
+ par_mapped[15] = par[4];
+ par_mapped[14] = par[4];
+ par_mapped[13] = par[4];
+ par_mapped[12] = par[4];
+ par_mapped[11] = par[3];
+ par_mapped[10] = par[3];
+ par_mapped[ 9] = par[2];
+ par_mapped[ 8] = par[2];
+ par_mapped[ 7] = par[2];
+ par_mapped[ 6] = par[2];
+ par_mapped[ 5] = par[1];
+ par_mapped[ 4] = par[1];
+ par_mapped[ 3] = par[1];
+ par_mapped[ 2] = par[0];
+ par_mapped[ 1] = par[0];
+ par_mapped[ 0] = par[0];
+}
+
+static void map_idx_20_to_34(int8_t *par_mapped, const int8_t *par, int full)
+{
+ if (full) {
+ par_mapped[33] = par[19];
+ par_mapped[32] = par[19];
+ par_mapped[31] = par[18];
+ par_mapped[30] = par[18];
+ par_mapped[29] = par[18];
+ par_mapped[28] = par[18];
+ par_mapped[27] = par[17];
+ par_mapped[26] = par[17];
+ par_mapped[25] = par[16];
+ par_mapped[24] = par[16];
+ par_mapped[23] = par[15];
+ par_mapped[22] = par[15];
+ par_mapped[21] = par[14];
+ par_mapped[20] = par[14];
+ par_mapped[19] = par[13];
+ par_mapped[18] = par[12];
+ par_mapped[17] = par[11];
+ }
+ par_mapped[16] = par[10];
+ par_mapped[15] = par[ 9];
+ par_mapped[14] = par[ 9];
+ par_mapped[13] = par[ 8];
+ par_mapped[12] = par[ 8];
+ par_mapped[11] = par[ 7];
+ par_mapped[10] = par[ 6];
+ par_mapped[ 9] = par[ 5];
+ par_mapped[ 8] = par[ 5];
+ par_mapped[ 7] = par[ 4];
+ par_mapped[ 6] = par[ 4];
+ par_mapped[ 5] = par[ 3];
+ par_mapped[ 4] = (par[ 2] + par[ 3]) / 2;
+ par_mapped[ 3] = par[ 2];
+ par_mapped[ 2] = par[ 1];
+ par_mapped[ 1] = (par[ 0] + par[ 1]) / 2;
+ par_mapped[ 0] = par[ 0];
+}
+
+static void map_val_20_to_34(float par[PS_MAX_NR_IIDICC])
+{
+ par[33] = par[19];
+ par[32] = par[19];
+ par[31] = par[18];
+ par[30] = par[18];
+ par[29] = par[18];
+ par[28] = par[18];
+ par[27] = par[17];
+ par[26] = par[17];
+ par[25] = par[16];
+ par[24] = par[16];
+ par[23] = par[15];
+ par[22] = par[15];
+ par[21] = par[14];
+ par[20] = par[14];
+ par[19] = par[13];
+ par[18] = par[12];
+ par[17] = par[11];
+ par[16] = par[10];
+ par[15] = par[ 9];
+ par[14] = par[ 9];
+ par[13] = par[ 8];
+ par[12] = par[ 8];
+ par[11] = par[ 7];
+ par[10] = par[ 6];
+ par[ 9] = par[ 5];
+ par[ 8] = par[ 5];
+ par[ 7] = par[ 4];
+ par[ 6] = par[ 4];
+ par[ 5] = par[ 3];
+ par[ 4] = (par[ 2] + par[ 3]) * 0.5f;
+ par[ 3] = par[ 2];
+ par[ 2] = par[ 1];
+ par[ 1] = (par[ 0] + par[ 1]) * 0.5f;
+ par[ 0] = par[ 0];
+}
+
+static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[32][2], int is34)
+{
+ float power[34][PS_QMF_TIME_SLOTS] = {{0}};
+ float transient_gain[34][PS_QMF_TIME_SLOTS];
+ float *peak_decay_nrg = ps->peak_decay_nrg;
+ float *power_smooth = ps->power_smooth;
+ float *peak_decay_diff_smooth = ps->peak_decay_diff_smooth;
+ float (*delay)[PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2] = ps->delay;
+ float (*ap_delay)[PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2] = ps->ap_delay;
+ const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20;
+ const float peak_decay_factor = 0.76592833836465f;
+ const float transient_impact = 1.5f;
+ const float a_smooth = 0.25f; //< Smoothing coefficient
+ int i, k, m, n;
+ int n0 = 0, nL = 32;
+ static const int link_delay[] = { 3, 4, 5 };
+ static const float a[] = { 0.65143905753106f,
+ 0.56471812200776f,
+ 0.48954165955695f };
+
+ if (is34 != ps->is34bands_old) {
+ memset(ps->peak_decay_nrg, 0, sizeof(ps->peak_decay_nrg));
+ memset(ps->power_smooth, 0, sizeof(ps->power_smooth));
+ memset(ps->peak_decay_diff_smooth, 0, sizeof(ps->peak_decay_diff_smooth));
+ memset(ps->delay, 0, sizeof(ps->delay));
+ memset(ps->ap_delay, 0, sizeof(ps->ap_delay));
+ }
+
+ for (n = n0; n < nL; n++) {
+ for (k = 0; k < NR_BANDS[is34]; k++) {
+ int i = k_to_i[k];
+ power[i][n] += s[k][n][0] * s[k][n][0] + s[k][n][1] * s[k][n][1];
+ }
+ }
+
+ //Transient detection
+ for (i = 0; i < NR_PAR_BANDS[is34]; i++) {
+ for (n = n0; n < nL; n++) {
+ float decayed_peak = peak_decay_factor * peak_decay_nrg[i];
+ float denom;
+ peak_decay_nrg[i] = FFMAX(decayed_peak, power[i][n]);
+ power_smooth[i] += a_smooth * (power[i][n] - power_smooth[i]);
+ peak_decay_diff_smooth[i] += a_smooth * (peak_decay_nrg[i] - power[i][n] - peak_decay_diff_smooth[i]);
+ denom = transient_impact * peak_decay_diff_smooth[i];
+ transient_gain[i][n] = (denom > power_smooth[i]) ?
+ power_smooth[i] / denom : 1.0f;
+ }
+ }
+
+ //Decorrelation and transient reduction
+ // PS_AP_LINKS - 1
+ // -----
+ // | | Q_fract_allpass[k][m]*z^-link_delay[m] - a[m]*g_decay_slope[k]
+ //H[k][z] = z^-2 * phi_fract[k] * | | ----------------------------------------------------------------
+ // | | 1 - a[m]*g_decay_slope[k]*Q_fract_allpass[k][m]*z^-link_delay[m]
+ // m = 0
+ //d[k][z] (out) = transient_gain_mapped[k][z] * H[k][z] * s[k][z]
+ for (k = 0; k < NR_ALLPASS_BANDS[is34]; k++) {
+ int b = k_to_i[k];
+ float g_decay_slope = 1.f - DECAY_SLOPE * (k - DECAY_CUTOFF[is34]);
+ float ag[PS_AP_LINKS];
+ g_decay_slope = av_clipf(g_decay_slope, 0.f, 1.f);
+ memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
+ memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
+ for (m = 0; m < PS_AP_LINKS; m++) {
+ memcpy(ap_delay[k][m], ap_delay[k][m]+numQMFSlots, 5*sizeof(ap_delay[k][m][0]));
+ ag[m] = a[m] * g_decay_slope;
+ }
+ for (n = n0; n < nL; n++) {
+ float in_re = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][0] -
+ delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][1];
+ float in_im = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][1] +
+ delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][0];
+ for (m = 0; m < PS_AP_LINKS; m++) {
+ float a_re = ag[m] * in_re;
+ float a_im = ag[m] * in_im;
+ float link_delay_re = ap_delay[k][m][n+5-link_delay[m]][0];
+ float link_delay_im = ap_delay[k][m][n+5-link_delay[m]][1];
+ float fractional_delay_re = Q_fract_allpass[is34][k][m][0];
+ float fractional_delay_im = Q_fract_allpass[is34][k][m][1];
+ ap_delay[k][m][n+5][0] = in_re;
+ ap_delay[k][m][n+5][1] = in_im;
+ in_re = link_delay_re * fractional_delay_re - link_delay_im * fractional_delay_im - a_re;
+ in_im = link_delay_re * fractional_delay_im + link_delay_im * fractional_delay_re - a_im;
+ ap_delay[k][m][n+5][0] += ag[m] * in_re;
+ ap_delay[k][m][n+5][1] += ag[m] * in_im;
+ }
+ out[k][n][0] = transient_gain[b][n] * in_re;
+ out[k][n][1] = transient_gain[b][n] * in_im;
+ }
+ }
+ for (; k < SHORT_DELAY_BAND[is34]; k++) {
+ memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
+ memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
+ for (n = n0; n < nL; n++) {
+ //H = delay 14
+ out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][0];
+ out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][1];
+ }
+ }
+ for (; k < NR_BANDS[is34]; k++) {
+ memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
+ memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
+ for (n = n0; n < nL; n++) {
+ //H = delay 1
+ out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][0];
+ out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][1];
+ }
+ }
+}
+
+static void remap34(int8_t (**p_par_mapped)[PS_MAX_NR_IIDICC],
+ int8_t (*par)[PS_MAX_NR_IIDICC],
+ int num_par, int num_env, int full)
+{
+ int8_t (*par_mapped)[PS_MAX_NR_IIDICC] = *p_par_mapped;
+ int e;
+ if (num_par == 20 || num_par == 11) {
+ for (e = 0; e < num_env; e++) {
+ map_idx_20_to_34(par_mapped[e], par[e], full);
+ }
+ } else if (num_par == 10 || num_par == 5) {
+ for (e = 0; e < num_env; e++) {
+ map_idx_10_to_34(par_mapped[e], par[e], full);
+ }
+ } else {
+ *p_par_mapped = par;
+ }
+}
+
+static void remap20(int8_t (**p_par_mapped)[PS_MAX_NR_IIDICC],
+ int8_t (*par)[PS_MAX_NR_IIDICC],
+ int num_par, int num_env, int full)
+{
+ int8_t (*par_mapped)[PS_MAX_NR_IIDICC] = *p_par_mapped;
+ int e;
+ if (num_par == 34 || num_par == 17) {
+ for (e = 0; e < num_env; e++) {
+ map_idx_34_to_20(par_mapped[e], par[e], full);
+ }
+ } else if (num_par == 10 || num_par == 5) {
+ for (e = 0; e < num_env; e++) {
+ map_idx_10_to_20(par_mapped[e], par[e], full);
+ }
+ } else {
+ *p_par_mapped = par;
+ }
+}
+
+static void stereo_processing(PSContext *ps, float (*l)[32][2], float (*r)[32][2], int is34)
+{
+ int e, b, k, n;
+
+ float (*H11)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H11;
+ float (*H12)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H12;
+ float (*H21)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H21;
+ float (*H22)[PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC] = ps->H22;
+ int8_t *opd_hist = ps->opd_hist;
+ int8_t *ipd_hist = ps->ipd_hist;
+ int8_t iid_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC];
+ int8_t icc_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC];
+ int8_t ipd_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC];
+ int8_t opd_mapped_buf[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC];
+ int8_t (*iid_mapped)[PS_MAX_NR_IIDICC] = iid_mapped_buf;
+ int8_t (*icc_mapped)[PS_MAX_NR_IIDICC] = icc_mapped_buf;
+ int8_t (*ipd_mapped)[PS_MAX_NR_IIDICC] = ipd_mapped_buf;
+ int8_t (*opd_mapped)[PS_MAX_NR_IIDICC] = opd_mapped_buf;
+ const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20;
+ const float (*H_LUT)[8][4] = (PS_BASELINE || ps->icc_mode < 3) ? HA : HB;
+
+ //Remapping
+ memcpy(H11[0][0], H11[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[0][0][0]));
+ memcpy(H11[1][0], H11[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H11[1][0][0]));
+ memcpy(H12[0][0], H12[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[0][0][0]));
+ memcpy(H12[1][0], H12[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H12[1][0][0]));
+ memcpy(H21[0][0], H21[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[0][0][0]));
+ memcpy(H21[1][0], H21[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H21[1][0][0]));
+ memcpy(H22[0][0], H22[0][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[0][0][0]));
+ memcpy(H22[1][0], H22[1][ps->num_env_old], PS_MAX_NR_IIDICC*sizeof(H22[1][0][0]));
+ if (is34) {
+ remap34(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1);
+ remap34(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1);
+ if (ps->enable_ipdopd) {
+ remap34(&ipd_mapped, ps->ipd_par, ps->nr_ipdopd_par, ps->num_env, 0);
+ remap34(&opd_mapped, ps->opd_par, ps->nr_ipdopd_par, ps->num_env, 0);
+ }
+ if (!ps->is34bands_old) {
+ map_val_20_to_34(H11[0][0]);
+ map_val_20_to_34(H11[1][0]);
+ map_val_20_to_34(H12[0][0]);
+ map_val_20_to_34(H12[1][0]);
+ map_val_20_to_34(H21[0][0]);
+ map_val_20_to_34(H21[1][0]);
+ map_val_20_to_34(H22[0][0]);
+ map_val_20_to_34(H22[1][0]);
+ ipdopd_reset(ipd_hist, opd_hist);
+ }
+ } else {
+ remap20(&iid_mapped, ps->iid_par, ps->nr_iid_par, ps->num_env, 1);
+ remap20(&icc_mapped, ps->icc_par, ps->nr_icc_par, ps->num_env, 1);
+ if (ps->enable_ipdopd) {
+ remap20(&ipd_mapped, ps->ipd_par, ps->nr_ipdopd_par, ps->num_env, 0);
+ remap20(&opd_mapped, ps->opd_par, ps->nr_ipdopd_par, ps->num_env, 0);
+ }
+ if (ps->is34bands_old) {
+ map_val_34_to_20(H11[0][0]);
+ map_val_34_to_20(H11[1][0]);
+ map_val_34_to_20(H12[0][0]);
+ map_val_34_to_20(H12[1][0]);
+ map_val_34_to_20(H21[0][0]);
+ map_val_34_to_20(H21[1][0]);
+ map_val_34_to_20(H22[0][0]);
+ map_val_34_to_20(H22[1][0]);
+ ipdopd_reset(ipd_hist, opd_hist);
+ }
+ }
+
+ //Mixing
+ for (e = 0; e < ps->num_env; e++) {
+ for (b = 0; b < NR_PAR_BANDS[is34]; b++) {
+ float h11, h12, h21, h22;
+ h11 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][0];
+ h12 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][1];
+ h21 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][2];
+ h22 = H_LUT[iid_mapped[e][b] + 7 + 23 * ps->iid_quant][icc_mapped[e][b]][3];
+ if (!PS_BASELINE && ps->enable_ipdopd && b < ps->nr_ipdopd_par) {
+ //The spec say says to only run this smoother when enable_ipdopd
+ //is set but the reference decoder appears to run it constantly
+ float h11i, h12i, h21i, h22i;
+ float ipd_adj_re, ipd_adj_im;
+ int opd_idx = opd_hist[b] * 8 + opd_mapped[e][b];
+ int ipd_idx = ipd_hist[b] * 8 + ipd_mapped[e][b];
+ float opd_re = pd_re_smooth[opd_idx];
+ float opd_im = pd_im_smooth[opd_idx];
+ float ipd_re = pd_re_smooth[ipd_idx];
+ float ipd_im = pd_im_smooth[ipd_idx];
+ opd_hist[b] = opd_idx & 0x3F;
+ ipd_hist[b] = ipd_idx & 0x3F;
+
+ ipd_adj_re = opd_re*ipd_re + opd_im*ipd_im;
+ ipd_adj_im = opd_im*ipd_re - opd_re*ipd_im;
+ h11i = h11 * opd_im;
+ h11 = h11 * opd_re;
+ h12i = h12 * ipd_adj_im;
+ h12 = h12 * ipd_adj_re;
+ h21i = h21 * opd_im;
+ h21 = h21 * opd_re;
+ h22i = h22 * ipd_adj_im;
+ h22 = h22 * ipd_adj_re;
+ H11[1][e+1][b] = h11i;
+ H12[1][e+1][b] = h12i;
+ H21[1][e+1][b] = h21i;
+ H22[1][e+1][b] = h22i;
+ }
+ H11[0][e+1][b] = h11;
+ H12[0][e+1][b] = h12;
+ H21[0][e+1][b] = h21;
+ H22[0][e+1][b] = h22;
+ }
+ for (k = 0; k < NR_BANDS[is34]; k++) {
+ float h11r, h12r, h21r, h22r;
+ float h11i, h12i, h21i, h22i;
+ float h11r_step, h12r_step, h21r_step, h22r_step;
+ float h11i_step, h12i_step, h21i_step, h22i_step;
+ int start = ps->border_position[e];
+ int stop = ps->border_position[e+1];
+ float width = 1.f / (stop - start);
+ b = k_to_i[k];
+ h11r = H11[0][e][b];
+ h12r = H12[0][e][b];
+ h21r = H21[0][e][b];
+ h22r = H22[0][e][b];
+ if (!PS_BASELINE && ps->enable_ipdopd) {
+ //Is this necessary? ps_04_new seems unchanged
+ if ((is34 && k <= 13 && k >= 9) || (!is34 && k <= 1)) {
+ h11i = -H11[1][e][b];
+ h12i = -H12[1][e][b];
+ h21i = -H21[1][e][b];
+ h22i = -H22[1][e][b];
+ } else {
+ h11i = H11[1][e][b];
+ h12i = H12[1][e][b];
+ h21i = H21[1][e][b];
+ h22i = H22[1][e][b];
+ }
+ }
+ //Interpolation
+ h11r_step = (H11[0][e+1][b] - h11r) * width;
+ h12r_step = (H12[0][e+1][b] - h12r) * width;
+ h21r_step = (H21[0][e+1][b] - h21r) * width;
+ h22r_step = (H22[0][e+1][b] - h22r) * width;
+ if (!PS_BASELINE && ps->enable_ipdopd) {
+ h11i_step = (H11[1][e+1][b] - h11i) * width;
+ h12i_step = (H12[1][e+1][b] - h12i) * width;
+ h21i_step = (H21[1][e+1][b] - h21i) * width;
+ h22i_step = (H22[1][e+1][b] - h22i) * width;
+ }
+ for (n = start + 1; n <= stop; n++) {
+ //l is s, r is d
+ float l_re = l[k][n][0];
+ float l_im = l[k][n][1];
+ float r_re = r[k][n][0];
+ float r_im = r[k][n][1];
+ h11r += h11r_step;
+ h12r += h12r_step;
+ h21r += h21r_step;
+ h22r += h22r_step;
+ if (!PS_BASELINE && ps->enable_ipdopd) {
+ h11i += h11i_step;
+ h12i += h12i_step;
+ h21i += h21i_step;
+ h22i += h22i_step;
+
+ l[k][n][0] = h11r*l_re + h21r*r_re - h11i*l_im - h21i*r_im;
+ l[k][n][1] = h11r*l_im + h21r*r_im + h11i*l_re + h21i*r_re;
+ r[k][n][0] = h12r*l_re + h22r*r_re - h12i*l_im - h22i*r_im;
+ r[k][n][1] = h12r*l_im + h22r*r_im + h12i*l_re + h22i*r_re;
+ } else {
+ l[k][n][0] = h11r*l_re + h21r*r_re;
+ l[k][n][1] = h11r*l_im + h21r*r_im;
+ r[k][n][0] = h12r*l_re + h22r*r_re;
+ r[k][n][1] = h12r*l_im + h22r*r_im;
+ }
+ }
+ }
+ }
+}
+
+int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top)
+{
+ float Lbuf[91][32][2];
+ float Rbuf[91][32][2];
+ const int len = 32;
+ int is34 = ps->is34bands;
+
+ top += NR_BANDS[is34] - 64;
+ memset(ps->delay+top, 0, (NR_BANDS[is34] - top)*sizeof(ps->delay[0]));
+ if (top < NR_ALLPASS_BANDS[is34])
+ memset(ps->ap_delay + top, 0, (NR_ALLPASS_BANDS[is34] - top)*sizeof(ps->ap_delay[0]));
+
+ hybrid_analysis(Lbuf, ps->in_buf, L, is34, len);
+ decorrelation(ps, Rbuf, Lbuf, is34);
+ stereo_processing(ps, Lbuf, Rbuf, is34);
+ hybrid_synthesis(L, Lbuf, is34, len);
+ hybrid_synthesis(R, Rbuf, is34, len);
+
+ return 0;
+}
+
+#define PS_INIT_VLC_STATIC(num, size) \
+ INIT_VLC_STATIC(&vlc_ps[num], 9, ps_tmp[num].table_size / ps_tmp[num].elem_size, \
+ ps_tmp[num].ps_bits, 1, 1, \
+ ps_tmp[num].ps_codes, ps_tmp[num].elem_size, ps_tmp[num].elem_size, \
+ size);
+
+#define PS_VLC_ROW(name) \
+ { name ## _codes, name ## _bits, sizeof(name ## _codes), sizeof(name ## _codes[0]) }
+
+av_cold void ff_ps_init(void) {
+ // Syntax initialization
+ static const struct {
+ const void *ps_codes, *ps_bits;
+ const unsigned int table_size, elem_size;
+ } ps_tmp[] = {
+ PS_VLC_ROW(huff_iid_df1),
+ PS_VLC_ROW(huff_iid_dt1),
+ PS_VLC_ROW(huff_iid_df0),
+ PS_VLC_ROW(huff_iid_dt0),
+ PS_VLC_ROW(huff_icc_df),
+ PS_VLC_ROW(huff_icc_dt),
+ PS_VLC_ROW(huff_ipd_df),
+ PS_VLC_ROW(huff_ipd_dt),
+ PS_VLC_ROW(huff_opd_df),
+ PS_VLC_ROW(huff_opd_dt),
+ };
+
+ PS_INIT_VLC_STATIC(0, 1544);
+ PS_INIT_VLC_STATIC(1, 832);
+ PS_INIT_VLC_STATIC(2, 1024);
+ PS_INIT_VLC_STATIC(3, 1036);
+ PS_INIT_VLC_STATIC(4, 544);
+ PS_INIT_VLC_STATIC(5, 544);
+ PS_INIT_VLC_STATIC(6, 512);
+ PS_INIT_VLC_STATIC(7, 512);
+ PS_INIT_VLC_STATIC(8, 512);
+ PS_INIT_VLC_STATIC(9, 512);
+
+ ps_tableinit();
+}
+
+av_cold void ff_ps_ctx_init(PSContext *ps)
+{
+}
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacps.h b/samples/rtsp_player/ffmpeg/libavcodec/aacps.h
new file mode 100755
index 0000000..5fc211a
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacps.h
@@ -0,0 +1,82 @@
+/*
+ * MPEG-4 Parametric Stereo definitions and declarations
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_PS_H
+#define AVCODEC_PS_H
+
+#include <stdint.h>
+
+#include "avcodec.h"
+#include "get_bits.h"
+
+#define PS_MAX_NUM_ENV 5
+#define PS_MAX_NR_IIDICC 34
+#define PS_MAX_NR_IPDOPD 17
+#define PS_MAX_SSB 91
+#define PS_MAX_AP_BANDS 50
+#define PS_QMF_TIME_SLOTS 32
+#define PS_MAX_DELAY 14
+#define PS_AP_LINKS 3
+#define PS_MAX_AP_DELAY 5
+
+typedef struct {
+ int start;
+ int enable_iid;
+ int iid_quant;
+ int nr_iid_par;
+ int nr_ipdopd_par;
+ int enable_icc;
+ int icc_mode;
+ int nr_icc_par;
+ int enable_ext;
+ int frame_class;
+ int num_env_old;
+ int num_env;
+ int enable_ipdopd;
+ int border_position[PS_MAX_NUM_ENV+1];
+ int8_t iid_par[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; //<Inter-channel Intensity Difference Parameters
+ int8_t icc_par[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; //<Inter-Channel Coherence Parameters
+ /* ipd/opd is iid/icc sized so that the same functions can handle both */
+ int8_t ipd_par[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; //<Inter-channel Phase Difference Parameters
+ int8_t opd_par[PS_MAX_NUM_ENV][PS_MAX_NR_IIDICC]; //<Overall Phase Difference Parameters
+ int is34bands;
+ int is34bands_old;
+
+ float in_buf[5][44][2];
+ float delay[PS_MAX_SSB][PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2];
+ float ap_delay[PS_MAX_AP_BANDS][PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2];
+ float peak_decay_nrg[34];
+ float power_smooth[34];
+ float peak_decay_diff_smooth[34];
+ float H11[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
+ float H12[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
+ float H21[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
+ float H22[2][PS_MAX_NUM_ENV+1][PS_MAX_NR_IIDICC];
+ int8_t opd_hist[PS_MAX_NR_IIDICC];
+ int8_t ipd_hist[PS_MAX_NR_IIDICC];
+} PSContext;
+
+void ff_ps_init(void);
+void ff_ps_ctx_init(PSContext *ps);
+int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb, PSContext *ps, int bits_left);
+int ff_ps_apply(AVCodecContext *avctx, PSContext *ps, float L[2][38][64], float R[2][38][64], int top);
+
+#endif /* AVCODEC_PS_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.c b/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.c
new file mode 100755
index 0000000..dc7797f
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.c
@@ -0,0 +1,93 @@
+/*
+ * Generate a header file for hardcoded Parametric Stereo tables
+ *
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#define CONFIG_HARDCODED_TABLES 0
+#include "aacps_tablegen.h"
+#include "tableprint.h"
+
+void write_float_3d_array (const void *p, int b, int c, int d)
+{
+ int i;
+ const float *f = p;
+ for (i = 0; i < b; i++) {
+ printf("{\n");
+ write_float_2d_array(f, c, d);
+ printf("},\n");
+ f += c * d;
+ }
+}
+
+void write_float_4d_array (const void *p, int a, int b, int c, int d)
+{
+ int i;
+ const float *f = p;
+ for (i = 0; i < a; i++) {
+ printf("{\n");
+ write_float_3d_array(f, b, c, d);
+ printf("},\n");
+ f += b * c * d;
+ }
+}
+
+int main(void)
+{
+ ps_tableinit();
+
+ write_fileheader();
+
+ printf("static const float pd_re_smooth[8*8*8] = {\n");
+ write_float_array(pd_re_smooth, 8*8*8);
+ printf("};\n");
+ printf("static const float pd_im_smooth[8*8*8] = {\n");
+ write_float_array(pd_im_smooth, 8*8*8);
+ printf("};\n");
+
+ printf("static const float HA[46][8][4] = {\n");
+ write_float_3d_array(HA, 46, 8, 4);
+ printf("};\n");
+ printf("static const float HB[46][8][4] = {\n");
+ write_float_3d_array(HB, 46, 8, 4);
+ printf("};\n");
+
+ printf("static const float f20_0_8[8][7][2] = {\n");
+ write_float_3d_array(f20_0_8, 8, 7, 2);
+ printf("};\n");
+ printf("static const float f34_0_12[12][7][2] = {\n");
+ write_float_3d_array(f34_0_12, 12, 7, 2);
+ printf("};\n");
+ printf("static const float f34_1_8[8][7][2] = {\n");
+ write_float_3d_array(f34_1_8, 8, 7, 2);
+ printf("};\n");
+ printf("static const float f34_2_4[4][7][2] = {\n");
+ write_float_3d_array(f34_2_4, 4, 7, 2);
+ printf("};\n");
+
+ printf("static const float Q_fract_allpass[2][50][3][2] = {\n");
+ write_float_4d_array(Q_fract_allpass, 2, 50, 3, 2);
+ printf("};\n");
+ printf("static const float phi_fract[2][50][2] = {\n");
+ write_float_3d_array(phi_fract, 2, 50, 2);
+ printf("};\n");
+
+ return 0;
+}
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.h b/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.h
new file mode 100755
index 0000000..c7e062b
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacps_tablegen.h
@@ -0,0 +1,212 @@
+/*
+ * Header file for hardcoded Parametric Stereo tables
+ *
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AACPS_TABLEGEN_H
+#define AACPS_TABLEGEN_H
+
+#include <stdint.h>
+
+#if CONFIG_HARDCODED_TABLES
+#define ps_tableinit()
+#include "libavcodec/aacps_tables.h"
+#else
+#include "libavutil/common.h"
+#include "libavutil/mathematics.h"
+#define NR_ALLPASS_BANDS20 30
+#define NR_ALLPASS_BANDS34 50
+#define PS_AP_LINKS 3
+static float pd_re_smooth[8*8*8];
+static float pd_im_smooth[8*8*8];
+static float HA[46][8][4];
+static float HB[46][8][4];
+static float f20_0_8 [ 8][7][2];
+static float f34_0_12[12][7][2];
+static float f34_1_8 [ 8][7][2];
+static float f34_2_4 [ 4][7][2];
+static float Q_fract_allpass[2][50][3][2];
+static float phi_fract[2][50][2];
+
+static const float g0_Q8[] = {
+ 0.00746082949812f, 0.02270420949825f, 0.04546865930473f, 0.07266113929591f,
+ 0.09885108575264f, 0.11793710567217f, 0.125f
+};
+
+static const float g0_Q12[] = {
+ 0.04081179924692f, 0.03812810994926f, 0.05144908135699f, 0.06399831151592f,
+ 0.07428313801106f, 0.08100347892914f, 0.08333333333333f
+};
+
+static const float g1_Q8[] = {
+ 0.01565675600122f, 0.03752716391991f, 0.05417891378782f, 0.08417044116767f,
+ 0.10307344158036f, 0.12222452249753f, 0.125f
+};
+
+static const float g2_Q4[] = {
+ -0.05908211155639f, -0.04871498374946f, 0.0f, 0.07778723915851f,
+ 0.16486303567403f, 0.23279856662996f, 0.25f
+};
+
+static void make_filters_from_proto(float (*filter)[7][2], const float *proto, int bands)
+{
+ int q, n;
+ for (q = 0; q < bands; q++) {
+ for (n = 0; n < 7; n++) {
+ double theta = 2 * M_PI * (q + 0.5) * (n - 6) / bands;
+ filter[q][n][0] = proto[n] * cos(theta);
+ filter[q][n][1] = proto[n] * -sin(theta);
+ }
+ }
+}
+
+static void ps_tableinit(void)
+{
+ static const float ipdopd_sin[] = { 0, M_SQRT1_2, 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2 };
+ static const float ipdopd_cos[] = { 1, M_SQRT1_2, 0, -M_SQRT1_2, -1, -M_SQRT1_2, 0, M_SQRT1_2 };
+ int pd0, pd1, pd2;
+
+ static const float iid_par_dequant[] = {
+ //iid_par_dequant_default
+ 0.05623413251903, 0.12589254117942, 0.19952623149689, 0.31622776601684,
+ 0.44668359215096, 0.63095734448019, 0.79432823472428, 1,
+ 1.25892541179417, 1.58489319246111, 2.23872113856834, 3.16227766016838,
+ 5.01187233627272, 7.94328234724282, 17.7827941003892,
+ //iid_par_dequant_fine
+ 0.00316227766017, 0.00562341325190, 0.01, 0.01778279410039,
+ 0.03162277660168, 0.05623413251903, 0.07943282347243, 0.11220184543020,
+ 0.15848931924611, 0.22387211385683, 0.31622776601684, 0.39810717055350,
+ 0.50118723362727, 0.63095734448019, 0.79432823472428, 1,
+ 1.25892541179417, 1.58489319246111, 1.99526231496888, 2.51188643150958,
+ 3.16227766016838, 4.46683592150963, 6.30957344480193, 8.91250938133745,
+ 12.5892541179417, 17.7827941003892, 31.6227766016838, 56.2341325190349,
+ 100, 177.827941003892, 316.227766016837,
+ };
+ static const float icc_invq[] = {
+ 1, 0.937, 0.84118, 0.60092, 0.36764, 0, -0.589, -1
+ };
+ static const float acos_icc_invq[] = {
+ 0, 0.35685527, 0.57133466, 0.92614472, 1.1943263, M_PI/2, 2.2006171, M_PI
+ };
+ int iid, icc;
+
+ int k, m;
+ static const int8_t f_center_20[] = {
+ -3, -1, 1, 3, 5, 7, 10, 14, 18, 22,
+ };
+ static const int8_t f_center_34[] = {
+ 2, 6, 10, 14, 18, 22, 26, 30,
+ 34,-10, -6, -2, 51, 57, 15, 21,
+ 27, 33, 39, 45, 54, 66, 78, 42,
+ 102, 66, 78, 90,102,114,126, 90,
+ };
+ static const float fractional_delay_links[] = { 0.43f, 0.75f, 0.347f };
+ const float fractional_delay_gain = 0.39f;
+
+ for (pd0 = 0; pd0 < 8; pd0++) {
+ float pd0_re = ipdopd_cos[pd0];
+ float pd0_im = ipdopd_sin[pd0];
+ for (pd1 = 0; pd1 < 8; pd1++) {
+ float pd1_re = ipdopd_cos[pd1];
+ float pd1_im = ipdopd_sin[pd1];
+ for (pd2 = 0; pd2 < 8; pd2++) {
+ float pd2_re = ipdopd_cos[pd2];
+ float pd2_im = ipdopd_sin[pd2];
+ float re_smooth = 0.25f * pd0_re + 0.5f * pd1_re + pd2_re;
+ float im_smooth = 0.25f * pd0_im + 0.5f * pd1_im + pd2_im;
+ float pd_mag = 1 / sqrt(im_smooth * im_smooth + re_smooth * re_smooth);
+ pd_re_smooth[pd0*64+pd1*8+pd2] = re_smooth * pd_mag;
+ pd_im_smooth[pd0*64+pd1*8+pd2] = im_smooth * pd_mag;
+ }
+ }
+ }
+
+ for (iid = 0; iid < 46; iid++) {
+ float c = iid_par_dequant[iid]; //<Linear Inter-channel Intensity Difference
+ float c1 = (float)M_SQRT2 / sqrtf(1.0f + c*c);
+ float c2 = c * c1;
+ for (icc = 0; icc < 8; icc++) {
+ /*if (PS_BASELINE || ps->icc_mode < 3)*/ {
+ float alpha = 0.5f * acos_icc_invq[icc];
+ float beta = alpha * (c1 - c2) * (float)M_SQRT1_2;
+ HA[iid][icc][0] = c2 * cosf(beta + alpha);
+ HA[iid][icc][1] = c1 * cosf(beta - alpha);
+ HA[iid][icc][2] = c2 * sinf(beta + alpha);
+ HA[iid][icc][3] = c1 * sinf(beta - alpha);
+ } /* else */ {
+ float alpha, gamma, mu, rho;
+ float alpha_c, alpha_s, gamma_c, gamma_s;
+ rho = FFMAX(icc_invq[icc], 0.05f);
+ alpha = 0.5f * atan2f(2.0f * c * rho, c*c - 1.0f);
+ mu = c + 1.0f / c;
+ mu = sqrtf(1 + (4 * rho * rho - 4)/(mu * mu));
+ gamma = atanf(sqrtf((1.0f - mu)/(1.0f + mu)));
+ if (alpha < 0) alpha += M_PI/2;
+ alpha_c = cosf(alpha);
+ alpha_s = sinf(alpha);
+ gamma_c = cosf(gamma);
+ gamma_s = sinf(gamma);
+ HB[iid][icc][0] = M_SQRT2 * alpha_c * gamma_c;
+ HB[iid][icc][1] = M_SQRT2 * alpha_s * gamma_c;
+ HB[iid][icc][2] = -M_SQRT2 * alpha_s * gamma_s;
+ HB[iid][icc][3] = M_SQRT2 * alpha_c * gamma_s;
+ }
+ }
+ }
+
+ for (k = 0; k < NR_ALLPASS_BANDS20; k++) {
+ double f_center, theta;
+ if (k < FF_ARRAY_ELEMS(f_center_20))
+ f_center = f_center_20[k] * 0.125;
+ else
+ f_center = k - 6.5f;
+ for (m = 0; m < PS_AP_LINKS; m++) {
+ theta = -M_PI * fractional_delay_links[m] * f_center;
+ Q_fract_allpass[0][k][m][0] = cos(theta);
+ Q_fract_allpass[0][k][m][1] = sin(theta);
+ }
+ theta = -M_PI*fractional_delay_gain*f_center;
+ phi_fract[0][k][0] = cos(theta);
+ phi_fract[0][k][1] = sin(theta);
+ }
+ for (k = 0; k < NR_ALLPASS_BANDS34; k++) {
+ double f_center, theta;
+ if (k < FF_ARRAY_ELEMS(f_center_34))
+ f_center = f_center_34[k] / 24.;
+ else
+ f_center = k - 26.5f;
+ for (m = 0; m < PS_AP_LINKS; m++) {
+ theta = -M_PI * fractional_delay_links[m] * f_center;
+ Q_fract_allpass[1][k][m][0] = cos(theta);
+ Q_fract_allpass[1][k][m][1] = sin(theta);
+ }
+ theta = -M_PI*fractional_delay_gain*f_center;
+ phi_fract[1][k][0] = cos(theta);
+ phi_fract[1][k][1] = sin(theta);
+ }
+
+ make_filters_from_proto(f20_0_8, g0_Q8, 8);
+ make_filters_from_proto(f34_0_12, g0_Q12, 12);
+ make_filters_from_proto(f34_1_8, g1_Q8, 8);
+ make_filters_from_proto(f34_2_4, g2_Q4, 4);
+}
+#endif /* CONFIG_HARDCODED_TABLES */
+
+#endif /* AACPS_TABLEGEN_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacpsdata.c b/samples/rtsp_player/ffmpeg/libavcodec/aacpsdata.c
new file mode 100755
index 0000000..7431cae
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacpsdata.c
@@ -0,0 +1,163 @@
+/*
+ * MPEG-4 Parametric Stereo data tables
+ * Copyright (c) 2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static const uint8_t huff_iid_df1_bits[] = {
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 17, 18, 17, 17, 16, 16, 15, 14, 14,
+ 13, 12, 12, 11, 10, 10, 8, 7, 6, 5, 4, 3, 1, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 11, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 17, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18,
+};
+
+static const uint32_t huff_iid_df1_codes[] = {
+ 0x01FEB4, 0x01FEB5, 0x01FD76, 0x01FD77, 0x01FD74, 0x01FD75, 0x01FE8A,
+ 0x01FE8B, 0x01FE88, 0x00FE80, 0x01FEB6, 0x00FE82, 0x00FEB8, 0x007F42,
+ 0x007FAE, 0x003FAF, 0x001FD1, 0x001FE9, 0x000FE9, 0x0007EA, 0x0007FB,
+ 0x0003FB, 0x0001FB, 0x0001FF, 0x00007C, 0x00003C, 0x00001C, 0x00000C,
+ 0x000000, 0x000001, 0x000001, 0x000002, 0x000001, 0x00000D, 0x00001D,
+ 0x00003D, 0x00007D, 0x0000FC, 0x0001FC, 0x0003FC, 0x0003F4, 0x0007EB,
+ 0x000FEA, 0x001FEA, 0x001FD6, 0x003FD0, 0x007FAF, 0x007F43, 0x00FEB9,
+ 0x00FE83, 0x01FEB7, 0x00FE81, 0x01FE89, 0x01FE8E, 0x01FE8F, 0x01FE8C,
+ 0x01FE8D, 0x01FEB2, 0x01FEB3, 0x01FEB0, 0x01FEB1,
+};
+
+static const uint8_t huff_iid_dt1_bits[] = {
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 15, 15, 15, 15, 15, 15, 14, 14, 13,
+ 13, 13, 12, 12, 11, 10, 9, 9, 7, 6, 5, 3, 1, 2, 5, 6, 7, 8,
+ 9, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16,
+};
+
+static const uint16_t huff_iid_dt1_codes[] = {
+ 0x004ED4, 0x004ED5, 0x004ECE, 0x004ECF, 0x004ECC, 0x004ED6, 0x004ED8,
+ 0x004F46, 0x004F60, 0x002718, 0x002719, 0x002764, 0x002765, 0x00276D,
+ 0x0027B1, 0x0013B7, 0x0013D6, 0x0009C7, 0x0009E9, 0x0009ED, 0x0004EE,
+ 0x0004F7, 0x000278, 0x000139, 0x00009A, 0x00009F, 0x000020, 0x000011,
+ 0x00000A, 0x000003, 0x000001, 0x000000, 0x00000B, 0x000012, 0x000021,
+ 0x00004C, 0x00009B, 0x00013A, 0x000279, 0x000270, 0x0004EF, 0x0004E2,
+ 0x0009EA, 0x0009D8, 0x0013D7, 0x0013D0, 0x0027B2, 0x0027A2, 0x00271A,
+ 0x00271B, 0x004F66, 0x004F67, 0x004F61, 0x004F47, 0x004ED9, 0x004ED7,
+ 0x004ECD, 0x004ED2, 0x004ED3, 0x004ED0, 0x004ED1,
+};
+
+static const uint8_t huff_iid_df0_bits[] = {
+ 17, 17, 17, 17, 16, 15, 13, 10, 9, 7, 6, 5, 4, 3, 1, 3, 4, 5,
+ 6, 6, 8, 11, 13, 14, 14, 15, 17, 18, 18,
+};
+
+static const uint32_t huff_iid_df0_codes[] = {
+ 0x01FFFB, 0x01FFFC, 0x01FFFD, 0x01FFFA, 0x00FFFC, 0x007FFC, 0x001FFD,
+ 0x0003FE, 0x0001FE, 0x00007E, 0x00003C, 0x00001D, 0x00000D, 0x000005,
+ 0x000000, 0x000004, 0x00000C, 0x00001C, 0x00003D, 0x00003E, 0x0000FE,
+ 0x0007FE, 0x001FFC, 0x003FFC, 0x003FFD, 0x007FFD, 0x01FFFE, 0x03FFFE,
+ 0x03FFFF,
+};
+
+static const uint8_t huff_iid_dt0_bits[] = {
+ 19, 19, 19, 20, 20, 20, 17, 15, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7,
+ 9, 11, 13, 14, 17, 19, 20, 20, 20, 20, 20,
+};
+
+static const uint32_t huff_iid_dt0_codes[] = {
+ 0x07FFF9, 0x07FFFA, 0x07FFFB, 0x0FFFF8, 0x0FFFF9, 0x0FFFFA, 0x01FFFD,
+ 0x007FFE, 0x000FFE, 0x0003FE, 0x0000FE, 0x00003E, 0x00000E, 0x000002,
+ 0x000000, 0x000006, 0x00001E, 0x00007E, 0x0001FE, 0x0007FE, 0x001FFE,
+ 0x003FFE, 0x01FFFC, 0x07FFF8, 0x0FFFFB, 0x0FFFFC, 0x0FFFFD, 0x0FFFFE,
+ 0x0FFFFF,
+};
+
+static const uint8_t huff_icc_df_bits[] = {
+ 14, 14, 12, 10, 7, 5, 3, 1, 2, 4, 6, 8, 9, 11, 13,
+};
+
+static const uint16_t huff_icc_df_codes[] = {
+ 0x3FFF, 0x3FFE, 0x0FFE, 0x03FE, 0x007E, 0x001E, 0x0006, 0x0000,
+ 0x0002, 0x000E, 0x003E, 0x00FE, 0x01FE, 0x07FE, 0x1FFE,
+};
+
+static const uint8_t huff_icc_dt_bits[] = {
+ 14, 13, 11, 9, 7, 5, 3, 1, 2, 4, 6, 8, 10, 12, 14,
+};
+
+static const uint16_t huff_icc_dt_codes[] = {
+ 0x3FFE, 0x1FFE, 0x07FE, 0x01FE, 0x007E, 0x001E, 0x0006, 0x0000,
+ 0x0002, 0x000E, 0x003E, 0x00FE, 0x03FE, 0x0FFE, 0x3FFF,
+};
+
+static const uint8_t huff_ipd_df_bits[] = {
+ 1, 3, 4, 4, 4, 4, 4, 4,
+};
+
+static const uint8_t huff_ipd_df_codes[] = {
+ 0x01, 0x00, 0x06, 0x04, 0x02, 0x03, 0x05, 0x07,
+};
+
+static const uint8_t huff_ipd_dt_bits[] = {
+ 1, 3, 4, 5, 5, 4, 4, 3,
+};
+
+static const uint8_t huff_ipd_dt_codes[] = {
+ 0x01, 0x02, 0x02, 0x03, 0x02, 0x00, 0x03, 0x03,
+};
+
+static const uint8_t huff_opd_df_bits[] = {
+ 1, 3, 4, 4, 5, 5, 4, 3,
+};
+
+static const uint8_t huff_opd_df_codes[] = {
+ 0x01, 0x01, 0x06, 0x04, 0x0F, 0x0E, 0x05, 0x00,
+};
+
+static const uint8_t huff_opd_dt_bits[] = {
+ 1, 3, 4, 5, 5, 4, 4, 3,
+};
+
+static const uint8_t huff_opd_dt_codes[] = {
+ 0x01, 0x02, 0x01, 0x07, 0x06, 0x00, 0x02, 0x03,
+};
+
+static const int8_t huff_offset[] = {
+ 30, 30,
+ 14, 14,
+ 7, 7,
+ 0, 0,
+ 0, 0,
+};
+
+///Table 8.48
+static const int8_t k_to_i_20[] = {
+ 1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 14, 15,
+ 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19
+};
+///Table 8.49
+static const int8_t k_to_i_34[] = {
+ 0, 1, 2, 3, 4, 5, 6, 6, 7, 2, 1, 0, 10, 10, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 9, 14, 11, 12, 13, 14, 15, 16, 13, 16, 17, 18, 19, 20, 21,
+ 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 27, 28, 28, 28, 29, 29, 29,
+ 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33, 33,
+ 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33
+};
+
+static const float g1_Q2[] = {
+ 0.0f, 0.01899487526049f, 0.0f, -0.07293139167538f,
+ 0.0f, 0.30596630545168f, 0.5f
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.c b/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.c
new file mode 100755
index 0000000..baf9388
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.c
@@ -0,0 +1,924 @@
+/*
+ * AAC encoder psychoacoustic model
+ * Copyright (C) 2008 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC encoder psychoacoustic model
+ */
+
+#include "avcodec.h"
+#include "aactab.h"
+#include "psymodel.h"
+
+/***********************************
+ * TODOs:
+ * try other bitrate controlling mechanism (maybe use ratecontrol.c?)
+ * control quality for quality-based output
+ **********************************/
+
+/**
+ * constants for 3GPP AAC psychoacoustic model
+ * @{
+ */
+#define PSY_3GPP_THR_SPREAD_HI 1.5f // spreading factor for low-to-hi threshold spreading (15 dB/Bark)
+#define PSY_3GPP_THR_SPREAD_LOW 3.0f // spreading factor for hi-to-low threshold spreading (30 dB/Bark)
+/* spreading factor for low-to-hi energy spreading, long block, > 22kbps/channel (20dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_L1 2.0f
+/* spreading factor for low-to-hi energy spreading, long block, <= 22kbps/channel (15dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_L2 1.5f
+/* spreading factor for low-to-hi energy spreading, short block (15 dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_HI_S 1.5f
+/* spreading factor for hi-to-low energy spreading, long block (30dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_LOW_L 3.0f
+/* spreading factor for hi-to-low energy spreading, short block (20dB/Bark) */
+#define PSY_3GPP_EN_SPREAD_LOW_S 2.0f
+
+#define PSY_3GPP_RPEMIN 0.01f
+#define PSY_3GPP_RPELEV 2.0f
+
+#define PSY_3GPP_C1 3.0f /* log2(8) */
+#define PSY_3GPP_C2 1.3219281f /* log2(2.5) */
+#define PSY_3GPP_C3 0.55935729f /* 1 - C2 / C1 */
+
+#define PSY_SNR_1DB 7.9432821e-1f /* -1dB */
+#define PSY_SNR_25DB 3.1622776e-3f /* -25dB */
+
+#define PSY_3GPP_SAVE_SLOPE_L -0.46666667f
+#define PSY_3GPP_SAVE_SLOPE_S -0.36363637f
+#define PSY_3GPP_SAVE_ADD_L -0.84285712f
+#define PSY_3GPP_SAVE_ADD_S -0.75f
+#define PSY_3GPP_SPEND_SLOPE_L 0.66666669f
+#define PSY_3GPP_SPEND_SLOPE_S 0.81818181f
+#define PSY_3GPP_SPEND_ADD_L -0.35f
+#define PSY_3GPP_SPEND_ADD_S -0.26111111f
+#define PSY_3GPP_CLIP_LO_L 0.2f
+#define PSY_3GPP_CLIP_LO_S 0.2f
+#define PSY_3GPP_CLIP_HI_L 0.95f
+#define PSY_3GPP_CLIP_HI_S 0.75f
+
+#define PSY_3GPP_AH_THR_LONG 0.5f
+#define PSY_3GPP_AH_THR_SHORT 0.63f
+
+enum {
+ PSY_3GPP_AH_NONE,
+ PSY_3GPP_AH_INACTIVE,
+ PSY_3GPP_AH_ACTIVE
+};
+
+#define PSY_3GPP_BITS_TO_PE(bits) ((bits) * 1.18f)
+
+/* LAME psy model constants */
+#define PSY_LAME_FIR_LEN 21 ///< LAME psy model FIR order
+#define AAC_BLOCK_SIZE_LONG 1024 ///< long block size
+#define AAC_BLOCK_SIZE_SHORT 128 ///< short block size
+#define AAC_NUM_BLOCKS_SHORT 8 ///< number of blocks in a short sequence
+#define PSY_LAME_NUM_SUBBLOCKS 3 ///< Number of sub-blocks in each short block
+
+/**
+ * @}
+ */
+
+/**
+ * information for single band used by 3GPP TS26.403-inspired psychoacoustic model
+ */
+typedef struct AacPsyBand{
+ float energy; ///< band energy
+ float thr; ///< energy threshold
+ float thr_quiet; ///< threshold in quiet
+ float nz_lines; ///< number of non-zero spectral lines
+ float active_lines; ///< number of active spectral lines
+ float pe; ///< perceptual entropy
+ float pe_const; ///< constant part of the PE calculation
+ float norm_fac; ///< normalization factor for linearization
+ int avoid_holes; ///< hole avoidance flag
+}AacPsyBand;
+
+/**
+ * single/pair channel context for psychoacoustic model
+ */
+typedef struct AacPsyChannel{
+ AacPsyBand band[128]; ///< bands information
+ AacPsyBand prev_band[128]; ///< bands information from the previous frame
+
+ float win_energy; ///< sliding average of channel energy
+ float iir_state[2]; ///< hi-pass IIR filter state
+ uint8_t next_grouping; ///< stored grouping scheme for the next frame (in case of 8 short window sequence)
+ enum WindowSequence next_window_seq; ///< window sequence to be used in the next frame
+ /* LAME psy model specific members */
+ float attack_threshold; ///< attack threshold for this channel
+ float prev_energy_subshort[AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS];
+ int prev_attack; ///< attack value for the last short block in the previous sequence
+}AacPsyChannel;
+
+/**
+ * psychoacoustic model frame type-dependent coefficients
+ */
+typedef struct AacPsyCoeffs{
+ float ath; ///< absolute threshold of hearing per bands
+ float barks; ///< Bark value for each spectral band in long frame
+ float spread_low[2]; ///< spreading factor for low-to-high threshold spreading in long frame
+ float spread_hi [2]; ///< spreading factor for high-to-low threshold spreading in long frame
+ float min_snr; ///< minimal SNR
+}AacPsyCoeffs;
+
+/**
+ * 3GPP TS26.403-inspired psychoacoustic model specific data
+ */
+typedef struct AacPsyContext{
+ int chan_bitrate; ///< bitrate per channel
+ int frame_bits; ///< average bits per frame
+ int fill_level; ///< bit reservoir fill level
+ struct {
+ float min; ///< minimum allowed PE for bit factor calculation
+ float max; ///< maximum allowed PE for bit factor calculation
+ float previous; ///< allowed PE of the previous frame
+ float correction; ///< PE correction factor
+ } pe;
+ AacPsyCoeffs psy_coef[2][64];
+ AacPsyChannel *ch;
+}AacPsyContext;
+
+/**
+ * LAME psy model preset struct
+ */
+typedef struct {
+ int quality; ///< Quality to map the rest of the vaules to.
+ /* This is overloaded to be both kbps per channel in ABR mode, and
+ * requested quality in constant quality mode.
+ */
+ float st_lrm; ///< short threshold for L, R, and M channels
+} PsyLamePreset;
+
+/**
+ * LAME psy model preset table for ABR
+ */
+static const PsyLamePreset psy_abr_map[] = {
+/* TODO: Tuning. These were taken from LAME. */
+/* kbps/ch st_lrm */
+ { 8, 6.60},
+ { 16, 6.60},
+ { 24, 6.60},
+ { 32, 6.60},
+ { 40, 6.60},
+ { 48, 6.60},
+ { 56, 6.60},
+ { 64, 6.40},
+ { 80, 6.00},
+ { 96, 5.60},
+ {112, 5.20},
+ {128, 5.20},
+ {160, 5.20}
+};
+
+/**
+* LAME psy model preset table for constant quality
+*/
+static const PsyLamePreset psy_vbr_map[] = {
+/* vbr_q st_lrm */
+ { 0, 4.20},
+ { 1, 4.20},
+ { 2, 4.20},
+ { 3, 4.20},
+ { 4, 4.20},
+ { 5, 4.20},
+ { 6, 4.20},
+ { 7, 4.20},
+ { 8, 4.20},
+ { 9, 4.20},
+ {10, 4.20}
+};
+
+/**
+ * LAME psy model FIR coefficient table
+ */
+static const float psy_fir_coeffs[] = {
+ -8.65163e-18 * 2, -0.00851586 * 2, -6.74764e-18 * 2, 0.0209036 * 2,
+ -3.36639e-17 * 2, -0.0438162 * 2, -1.54175e-17 * 2, 0.0931738 * 2,
+ -5.52212e-17 * 2, -0.313819 * 2
+};
+
+/**
+ * calculates the attack threshold for ABR from the above table for the LAME psy model
+ */
+static float lame_calc_attack_threshold(int bitrate)
+{
+ /* Assume max bitrate to start with */
+ int lower_range = 12, upper_range = 12;
+ int lower_range_kbps = psy_abr_map[12].quality;
+ int upper_range_kbps = psy_abr_map[12].quality;
+ int i;
+
+ /* Determine which bitrates the value specified falls between.
+ * If the loop ends without breaking our above assumption of 320kbps was correct.
+ */
+ for (i = 1; i < 13; i++) {
+ if (FFMAX(bitrate, psy_abr_map[i].quality) != bitrate) {
+ upper_range = i;
+ upper_range_kbps = psy_abr_map[i ].quality;
+ lower_range = i - 1;
+ lower_range_kbps = psy_abr_map[i - 1].quality;
+ break; /* Upper range found */
+ }
+ }
+
+ /* Determine which range the value specified is closer to */
+ if ((upper_range_kbps - bitrate) > (bitrate - lower_range_kbps))
+ return psy_abr_map[lower_range].st_lrm;
+ return psy_abr_map[upper_range].st_lrm;
+}
+
+/**
+ * LAME psy model specific initialization
+ */
+static void lame_window_init(AacPsyContext *ctx, AVCodecContext *avctx) {
+ int i, j;
+
+ for (i = 0; i < avctx->channels; i++) {
+ AacPsyChannel *pch = &ctx->ch[i];
+
+ if (avctx->flags & CODEC_FLAG_QSCALE)
+ pch->attack_threshold = psy_vbr_map[avctx->global_quality / FF_QP2LAMBDA].st_lrm;
+ else
+ pch->attack_threshold = lame_calc_attack_threshold(avctx->bit_rate / avctx->channels / 1000);
+
+ for (j = 0; j < AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS; j++)
+ pch->prev_energy_subshort[j] = 10.0f;
+ }
+}
+
+/**
+ * Calculate Bark value for given line.
+ */
+static av_cold float calc_bark(float f)
+{
+ return 13.3f * atanf(0.00076f * f) + 3.5f * atanf((f / 7500.0f) * (f / 7500.0f));
+}
+
+#define ATH_ADD 4
+/**
+ * Calculate ATH value for given frequency.
+ * Borrowed from Lame.
+ */
+static av_cold float ath(float f, float add)
+{
+ f /= 1000.0f;
+ return 3.64 * pow(f, -0.8)
+ - 6.8 * exp(-0.6 * (f - 3.4) * (f - 3.4))
+ + 6.0 * exp(-0.15 * (f - 8.7) * (f - 8.7))
+ + (0.6 + 0.04 * add) * 0.001 * f * f * f * f;
+}
+
+static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
+ AacPsyContext *pctx;
+ float bark;
+ int i, j, g, start;
+ float prev, minscale, minath, minsnr, pe_min;
+ const int chan_bitrate = ctx->avctx->bit_rate / ctx->avctx->channels;
+ const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : ctx->avctx->sample_rate / 2;
+ const float num_bark = calc_bark((float)bandwidth);
+
+ ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
+ pctx = (AacPsyContext*) ctx->model_priv_data;
+
+ pctx->chan_bitrate = chan_bitrate;
+ pctx->frame_bits = chan_bitrate * AAC_BLOCK_SIZE_LONG / ctx->avctx->sample_rate;
+ pctx->pe.min = 8.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
+ pctx->pe.max = 12.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
+ ctx->bitres.size = 6144 - pctx->frame_bits;
+ ctx->bitres.size -= ctx->bitres.size % 8;
+ pctx->fill_level = ctx->bitres.size;
+ minath = ath(3410, ATH_ADD);
+ for (j = 0; j < 2; j++) {
+ AacPsyCoeffs *coeffs = pctx->psy_coef[j];
+ const uint8_t *band_sizes = ctx->bands[j];
+ float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f);
+ float avg_chan_bits = chan_bitrate / ctx->avctx->sample_rate * (j ? 128.0f : 1024.0f);
+ /* reference encoder uses 2.4% here instead of 60% like the spec says */
+ float bark_pe = 0.024f * PSY_3GPP_BITS_TO_PE(avg_chan_bits) / num_bark;
+ float en_spread_low = j ? PSY_3GPP_EN_SPREAD_LOW_S : PSY_3GPP_EN_SPREAD_LOW_L;
+ /* High energy spreading for long blocks <= 22kbps/channel and short blocks are the same. */
+ float en_spread_hi = (j || (chan_bitrate <= 22.0f)) ? PSY_3GPP_EN_SPREAD_HI_S : PSY_3GPP_EN_SPREAD_HI_L1;
+
+ i = 0;
+ prev = 0.0;
+ for (g = 0; g < ctx->num_bands[j]; g++) {
+ i += band_sizes[g];
+ bark = calc_bark((i-1) * line_to_frequency);
+ coeffs[g].barks = (bark + prev) / 2.0;
+ prev = bark;
+ }
+ for (g = 0; g < ctx->num_bands[j] - 1; g++) {
+ AacPsyCoeffs *coeff = &coeffs[g];
+ float bark_width = coeffs[g+1].barks - coeffs->barks;
+ coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW);
+ coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI);
+ coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
+ coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
+ pe_min = bark_pe * bark_width;
+ minsnr = pow(2.0f, pe_min / band_sizes[g]) - 1.5f;
+ coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
+ }
+ start = 0;
+ for (g = 0; g < ctx->num_bands[j]; g++) {
+ minscale = ath(start * line_to_frequency, ATH_ADD);
+ for (i = 1; i < band_sizes[g]; i++)
+ minscale = FFMIN(minscale, ath((start + i) * line_to_frequency, ATH_ADD));
+ coeffs[g].ath = minscale - minath;
+ start += band_sizes[g];
+ }
+ }
+
+ pctx->ch = av_mallocz(sizeof(AacPsyChannel) * ctx->avctx->channels);
+
+ lame_window_init(pctx, ctx->avctx);
+
+ return 0;
+}
+
+/**
+ * IIR filter used in block switching decision
+ */
+static float iir_filter(int in, float state[2])
+{
+ float ret;
+
+ ret = 0.7548f * (in - state[0]) + 0.5095f * state[1];
+ state[0] = in;
+ state[1] = ret;
+ return ret;
+}
+
+/**
+ * window grouping information stored as bits (0 - new group, 1 - group continues)
+ */
+static const uint8_t window_grouping[9] = {
+ 0xB6, 0x6C, 0xD8, 0xB2, 0x66, 0xC6, 0x96, 0x36, 0x36
+};
+
+/**
+ * Tell encoder which window types to use.
+ * @see 3GPP TS26.403 5.4.1 "Blockswitching"
+ */
+static FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
+ const int16_t *audio, const int16_t *la,
+ int channel, int prev_type)
+{
+ int i, j;
+ int br = ctx->avctx->bit_rate / ctx->avctx->channels;
+ int attack_ratio = br <= 16000 ? 18 : 10;
+ AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
+ AacPsyChannel *pch = &pctx->ch[channel];
+ uint8_t grouping = 0;
+ int next_type = pch->next_window_seq;
+ FFPsyWindowInfo wi;
+
+ memset(&wi, 0, sizeof(wi));
+ if (la) {
+ float s[8], v;
+ int switch_to_eight = 0;
+ float sum = 0.0, sum2 = 0.0;
+ int attack_n = 0;
+ int stay_short = 0;
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 128; j++) {
+ v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state);
+ sum += v*v;
+ }
+ s[i] = sum;
+ sum2 += sum;
+ }
+ for (i = 0; i < 8; i++) {
+ if (s[i] > pch->win_energy * attack_ratio) {
+ attack_n = i + 1;
+ switch_to_eight = 1;
+ break;
+ }
+ }
+ pch->win_energy = pch->win_energy*7/8 + sum2/64;
+
+ wi.window_type[1] = prev_type;
+ switch (prev_type) {
+ case ONLY_LONG_SEQUENCE:
+ wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE;
+ next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : ONLY_LONG_SEQUENCE;
+ break;
+ case LONG_START_SEQUENCE:
+ wi.window_type[0] = EIGHT_SHORT_SEQUENCE;
+ grouping = pch->next_grouping;
+ next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE;
+ break;
+ case LONG_STOP_SEQUENCE:
+ wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE;
+ next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : ONLY_LONG_SEQUENCE;
+ break;
+ case EIGHT_SHORT_SEQUENCE:
+ stay_short = next_type == EIGHT_SHORT_SEQUENCE || switch_to_eight;
+ wi.window_type[0] = stay_short ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE;
+ grouping = next_type == EIGHT_SHORT_SEQUENCE ? pch->next_grouping : 0;
+ next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE;
+ break;
+ }
+
+ pch->next_grouping = window_grouping[attack_n];
+ pch->next_window_seq = next_type;
+ } else {
+ for (i = 0; i < 3; i++)
+ wi.window_type[i] = prev_type;
+ grouping = (prev_type == EIGHT_SHORT_SEQUENCE) ? window_grouping[0] : 0;
+ }
+
+ wi.window_shape = 1;
+ if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) {
+ wi.num_windows = 1;
+ wi.grouping[0] = 1;
+ } else {
+ int lastgrp = 0;
+ wi.num_windows = 8;
+ for (i = 0; i < 8; i++) {
+ if (!((grouping >> i) & 1))
+ lastgrp = i;
+ wi.grouping[lastgrp]++;
+ }
+ }
+
+ return wi;
+}
+
+/* 5.6.1.2 "Calculation of Bit Demand" */
+static int calc_bit_demand(AacPsyContext *ctx, float pe, int bits, int size,
+ int short_window)
+{
+ const float bitsave_slope = short_window ? PSY_3GPP_SAVE_SLOPE_S : PSY_3GPP_SAVE_SLOPE_L;
+ const float bitsave_add = short_window ? PSY_3GPP_SAVE_ADD_S : PSY_3GPP_SAVE_ADD_L;
+ const float bitspend_slope = short_window ? PSY_3GPP_SPEND_SLOPE_S : PSY_3GPP_SPEND_SLOPE_L;
+ const float bitspend_add = short_window ? PSY_3GPP_SPEND_ADD_S : PSY_3GPP_SPEND_ADD_L;
+ const float clip_low = short_window ? PSY_3GPP_CLIP_LO_S : PSY_3GPP_CLIP_LO_L;
+ const float clip_high = short_window ? PSY_3GPP_CLIP_HI_S : PSY_3GPP_CLIP_HI_L;
+ float clipped_pe, bit_save, bit_spend, bit_factor, fill_level;
+
+ ctx->fill_level += ctx->frame_bits - bits;
+ ctx->fill_level = av_clip(ctx->fill_level, 0, size);
+ fill_level = av_clipf((float)ctx->fill_level / size, clip_low, clip_high);
+ clipped_pe = av_clipf(pe, ctx->pe.min, ctx->pe.max);
+ bit_save = (fill_level + bitsave_add) * bitsave_slope;
+ assert(bit_save <= 0.3f && bit_save >= -0.05000001f);
+ bit_spend = (fill_level + bitspend_add) * bitspend_slope;
+ assert(bit_spend <= 0.5f && bit_spend >= -0.1f);
+ /* The bit factor graph in the spec is obviously incorrect.
+ * bit_spend + ((bit_spend - bit_spend))...
+ * The reference encoder subtracts everything from 1, but also seems incorrect.
+ * 1 - bit_save + ((bit_spend + bit_save))...
+ * Hopefully below is correct.
+ */
+ bit_factor = 1.0f - bit_save + ((bit_spend - bit_save) / (ctx->pe.max - ctx->pe.min)) * (clipped_pe - ctx->pe.min);
+ /* NOTE: The reference encoder attempts to center pe max/min around the current pe. */
+ ctx->pe.max = FFMAX(pe, ctx->pe.max);
+ ctx->pe.min = FFMIN(pe, ctx->pe.min);
+
+ return FFMIN(ctx->frame_bits * bit_factor, ctx->frame_bits + size - bits);
+}
+
+static float calc_pe_3gpp(AacPsyBand *band)
+{
+ float pe, a;
+
+ band->pe = 0.0f;
+ band->pe_const = 0.0f;
+ band->active_lines = 0.0f;
+ if (band->energy > band->thr) {
+ a = log2f(band->energy);
+ pe = a - log2f(band->thr);
+ band->active_lines = band->nz_lines;
+ if (pe < PSY_3GPP_C1) {
+ pe = pe * PSY_3GPP_C3 + PSY_3GPP_C2;
+ a = a * PSY_3GPP_C3 + PSY_3GPP_C2;
+ band->active_lines *= PSY_3GPP_C3;
+ }
+ band->pe = pe * band->nz_lines;
+ band->pe_const = a * band->nz_lines;
+ }
+
+ return band->pe;
+}
+
+static float calc_reduction_3gpp(float a, float desired_pe, float pe,
+ float active_lines)
+{
+ float thr_avg, reduction;
+
+ thr_avg = powf(2.0f, (a - pe) / (4.0f * active_lines));
+ reduction = powf(2.0f, (a - desired_pe) / (4.0f * active_lines)) - thr_avg;
+
+ return FFMAX(reduction, 0.0f);
+}
+
+static float calc_reduced_thr_3gpp(AacPsyBand *band, float min_snr,
+ float reduction)
+{
+ float thr = band->thr;
+
+ if (band->energy > thr) {
+ thr = powf(thr, 0.25f) + reduction;
+ thr = powf(thr, 4.0f);
+
+ /* This deviates from the 3GPP spec to match the reference encoder.
+ * It performs min(thr_reduced, max(thr, energy/min_snr)) only for bands
+ * that have hole avoidance on (active or inactive). It always reduces the
+ * threshold of bands with hole avoidance off.
+ */
+ if (thr > band->energy * min_snr && band->avoid_holes != PSY_3GPP_AH_NONE) {
+ thr = FFMAX(band->thr, band->energy * min_snr);
+ band->avoid_holes = PSY_3GPP_AH_ACTIVE;
+ }
+ }
+
+ return thr;
+}
+
+/**
+ * Calculate band thresholds as suggested in 3GPP TS26.403
+ */
+static void psy_3gpp_analyze(FFPsyContext *ctx, int channel,
+ const float *coefs, const FFPsyWindowInfo *wi)
+{
+ AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
+ AacPsyChannel *pch = &pctx->ch[channel];
+ int start = 0;
+ int i, w, g;
+ float desired_bits, desired_pe, delta_pe, reduction, spread_en[128] = {0};
+ float a = 0.0f, active_lines = 0.0f, norm_fac = 0.0f;
+ float pe = pctx->chan_bitrate > 32000 ? 0.0f : FFMAX(50.0f, 100.0f - pctx->chan_bitrate * 100.0f / 32000.0f);
+ const int num_bands = ctx->num_bands[wi->num_windows == 8];
+ const uint8_t *band_sizes = ctx->bands[wi->num_windows == 8];
+ AacPsyCoeffs *coeffs = pctx->psy_coef[wi->num_windows == 8];
+ const float avoid_hole_thr = wi->num_windows == 8 ? PSY_3GPP_AH_THR_SHORT : PSY_3GPP_AH_THR_LONG;
+
+ //calculate energies, initial thresholds and related values - 5.4.2 "Threshold Calculation"
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ float form_factor = 0.0f;
+ band->energy = 0.0f;
+ for (i = 0; i < band_sizes[g]; i++) {
+ band->energy += coefs[start+i] * coefs[start+i];
+ form_factor += sqrtf(fabs(coefs[start+i]));
+ }
+ band->thr = band->energy * 0.001258925f;
+ band->nz_lines = form_factor / powf(band->energy / band_sizes[g], 0.25f);
+
+ start += band_sizes[g];
+ }
+ }
+ //modify thresholds and energies - spread, threshold in quiet, pre-echo control
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ AacPsyBand *bands = &pch->band[w];
+
+ //5.4.2.3 "Spreading" & 5.4.3 "Spreaded Energy Calculation"
+ spread_en[0] = bands[0].energy;
+ for (g = 1; g < num_bands; g++) {
+ bands[g].thr = FFMAX(bands[g].thr, bands[g-1].thr * coeffs[g].spread_hi[0]);
+ spread_en[w+g] = FFMAX(bands[g].energy, spread_en[w+g-1] * coeffs[g].spread_hi[1]);
+ }
+ for (g = num_bands - 2; g >= 0; g--) {
+ bands[g].thr = FFMAX(bands[g].thr, bands[g+1].thr * coeffs[g].spread_low[0]);
+ spread_en[w+g] = FFMAX(spread_en[w+g], spread_en[w+g+1] * coeffs[g].spread_low[1]);
+ }
+ //5.4.2.4 "Threshold in quiet"
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &bands[g];
+
+ band->thr_quiet = band->thr = FFMAX(band->thr, coeffs[g].ath);
+ //5.4.2.5 "Pre-echo control"
+ if (!(wi->window_type[0] == LONG_STOP_SEQUENCE || (wi->window_type[1] == LONG_START_SEQUENCE && !w)))
+ band->thr = FFMAX(PSY_3GPP_RPEMIN*band->thr, FFMIN(band->thr,
+ PSY_3GPP_RPELEV*pch->prev_band[w+g].thr_quiet));
+
+ /* 5.6.1.3.1 "Prepatory steps of the perceptual entropy calculation" */
+ pe += calc_pe_3gpp(band);
+ a += band->pe_const;
+ active_lines += band->active_lines;
+
+ /* 5.6.1.3.3 "Selection of the bands for avoidance of holes" */
+ if (spread_en[w+g] * avoid_hole_thr > band->energy || coeffs[g].min_snr > 1.0f)
+ band->avoid_holes = PSY_3GPP_AH_NONE;
+ else
+ band->avoid_holes = PSY_3GPP_AH_INACTIVE;
+ }
+ }
+
+ /* 5.6.1.3.2 "Calculation of the desired perceptual entropy" */
+ ctx->pe[channel] = pe;
+ desired_bits = calc_bit_demand(pctx, pe, ctx->bitres.bits, ctx->bitres.size, wi->num_windows == 8);
+ desired_pe = PSY_3GPP_BITS_TO_PE(desired_bits);
+ /* NOTE: PE correction is kept simple. During initial testing it had very
+ * little effect on the final bitrate. Probably a good idea to come
+ * back and do more testing later.
+ */
+ if (ctx->bitres.bits > 0)
+ desired_pe *= av_clipf(pctx->pe.previous / PSY_3GPP_BITS_TO_PE(ctx->bitres.bits),
+ 0.85f, 1.15f);
+ pctx->pe.previous = PSY_3GPP_BITS_TO_PE(desired_bits);
+
+ if (desired_pe < pe) {
+ /* 5.6.1.3.4 "First Estimation of the reduction value" */
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ reduction = calc_reduction_3gpp(a, desired_pe, pe, active_lines);
+ pe = 0.0f;
+ a = 0.0f;
+ active_lines = 0.0f;
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
+ /* recalculate PE */
+ pe += calc_pe_3gpp(band);
+ a += band->pe_const;
+ active_lines += band->active_lines;
+ }
+ }
+
+ /* 5.6.1.3.5 "Second Estimation of the reduction value" */
+ for (i = 0; i < 2; i++) {
+ float pe_no_ah = 0.0f, desired_pe_no_ah;
+ active_lines = a = 0.0f;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (band->avoid_holes != PSY_3GPP_AH_ACTIVE) {
+ pe_no_ah += band->pe;
+ a += band->pe_const;
+ active_lines += band->active_lines;
+ }
+ }
+ }
+ desired_pe_no_ah = FFMAX(desired_pe - (pe - pe_no_ah), 0.0f);
+ if (active_lines > 0.0f)
+ reduction += calc_reduction_3gpp(a, desired_pe_no_ah, pe_no_ah, active_lines);
+
+ pe = 0.0f;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (active_lines > 0.0f)
+ band->thr = calc_reduced_thr_3gpp(band, coeffs[g].min_snr, reduction);
+ pe += calc_pe_3gpp(band);
+ band->norm_fac = band->active_lines / band->thr;
+ norm_fac += band->norm_fac;
+ }
+ }
+ delta_pe = desired_pe - pe;
+ if (fabs(delta_pe) > 0.05f * desired_pe)
+ break;
+ }
+
+ if (pe < 1.15f * desired_pe) {
+ /* 6.6.1.3.6 "Final threshold modification by linearization" */
+ norm_fac = 1.0f / norm_fac;
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+
+ if (band->active_lines > 0.5f) {
+ float delta_sfb_pe = band->norm_fac * norm_fac * delta_pe;
+ float thr = band->thr;
+
+ thr *= powf(2.0f, delta_sfb_pe / band->active_lines);
+ if (thr > coeffs[g].min_snr * band->energy && band->avoid_holes == PSY_3GPP_AH_INACTIVE)
+ thr = FFMAX(band->thr, coeffs[g].min_snr * band->energy);
+ band->thr = thr;
+ }
+ }
+ }
+ } else {
+ /* 5.6.1.3.7 "Further perceptual entropy reduction" */
+ g = num_bands;
+ while (pe > desired_pe && g--) {
+ for (w = 0; w < wi->num_windows*16; w+= 16) {
+ AacPsyBand *band = &pch->band[w+g];
+ if (band->avoid_holes != PSY_3GPP_AH_NONE && coeffs[g].min_snr < PSY_SNR_1DB) {
+ coeffs[g].min_snr = PSY_SNR_1DB;
+ band->thr = band->energy * PSY_SNR_1DB;
+ pe += band->active_lines * 1.5f - band->pe;
+ }
+ }
+ }
+ /* TODO: allow more holes (unused without mid/side) */
+ }
+ }
+
+ for (w = 0; w < wi->num_windows*16; w += 16) {
+ for (g = 0; g < num_bands; g++) {
+ AacPsyBand *band = &pch->band[w+g];
+ FFPsyBand *psy_band = &ctx->psy_bands[channel*PSY_MAX_BANDS+w+g];
+
+ psy_band->threshold = band->thr;
+ psy_band->energy = band->energy;
+ }
+ }
+
+ memcpy(pch->prev_band, pch->band, sizeof(pch->band));
+}
+
+static av_cold void psy_3gpp_end(FFPsyContext *apc)
+{
+ AacPsyContext *pctx = (AacPsyContext*) apc->model_priv_data;
+ av_freep(&pctx->ch);
+ av_freep(&apc->model_priv_data);
+}
+
+static void lame_apply_block_type(AacPsyChannel *ctx, FFPsyWindowInfo *wi, int uselongblock)
+{
+ int blocktype = ONLY_LONG_SEQUENCE;
+ if (uselongblock) {
+ if (ctx->next_window_seq == EIGHT_SHORT_SEQUENCE)
+ blocktype = LONG_STOP_SEQUENCE;
+ } else {
+ blocktype = EIGHT_SHORT_SEQUENCE;
+ if (ctx->next_window_seq == ONLY_LONG_SEQUENCE)
+ ctx->next_window_seq = LONG_START_SEQUENCE;
+ if (ctx->next_window_seq == LONG_STOP_SEQUENCE)
+ ctx->next_window_seq = EIGHT_SHORT_SEQUENCE;
+ }
+
+ wi->window_type[0] = ctx->next_window_seq;
+ ctx->next_window_seq = blocktype;
+}
+
+static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx,
+ const int16_t *audio, const int16_t *la,
+ int channel, int prev_type)
+{
+ AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data;
+ AacPsyChannel *pch = &pctx->ch[channel];
+ int grouping = 0;
+ int uselongblock = 1;
+ int attacks[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
+ int i;
+ FFPsyWindowInfo wi;
+
+ memset(&wi, 0, sizeof(wi));
+ if (la) {
+ float hpfsmpl[AAC_BLOCK_SIZE_LONG];
+ float const *pf = hpfsmpl;
+ float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
+ float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
+ float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
+ int chans = ctx->avctx->channels;
+ const int16_t *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans;
+ int j, att_sum = 0;
+
+ /* LAME comment: apply high pass filter of fs/4 */
+ for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) {
+ float sum1, sum2;
+ sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans];
+ sum2 = 0.0;
+ for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) {
+ sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]);
+ sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]);
+ }
+ hpfsmpl[i] = sum1 + sum2;
+ }
+
+ /* Calculate the energies of each sub-shortblock */
+ for (i = 0; i < PSY_LAME_NUM_SUBBLOCKS; i++) {
+ energy_subshort[i] = pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 1) * PSY_LAME_NUM_SUBBLOCKS)];
+ assert(pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 2) * PSY_LAME_NUM_SUBBLOCKS + 1)] > 0);
+ attack_intensity[i] = energy_subshort[i] / pch->prev_energy_subshort[i + ((AAC_NUM_BLOCKS_SHORT - 2) * PSY_LAME_NUM_SUBBLOCKS + 1)];
+ energy_short[0] += energy_subshort[i];
+ }
+
+ for (i = 0; i < AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS; i++) {
+ float const *const pfe = pf + AAC_BLOCK_SIZE_LONG / (AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS);
+ float p = 1.0f;
+ for (; pf < pfe; pf++)
+ if (p < fabsf(*pf))
+ p = fabsf(*pf);
+ pch->prev_energy_subshort[i] = energy_subshort[i + PSY_LAME_NUM_SUBBLOCKS] = p;
+ energy_short[1 + i / PSY_LAME_NUM_SUBBLOCKS] += p;
+ /* FIXME: The indexes below are [i + 3 - 2] in the LAME source.
+ * Obviously the 3 and 2 have some significance, or this would be just [i + 1]
+ * (which is what we use here). What the 3 stands for is ambigious, as it is both
+ * number of short blocks, and the number of sub-short blocks.
+ * It seems that LAME is comparing each sub-block to sub-block + 1 in the
+ * previous block.
+ */
+ if (p > energy_subshort[i + 1])
+ p = p / energy_subshort[i + 1];
+ else if (energy_subshort[i + 1] > p * 10.0f)
+ p = energy_subshort[i + 1] / (p * 10.0f);
+ else
+ p = 0.0;
+ attack_intensity[i + PSY_LAME_NUM_SUBBLOCKS] = p;
+ }
+
+ /* compare energy between sub-short blocks */
+ for (i = 0; i < (AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS; i++)
+ if (!attacks[i / PSY_LAME_NUM_SUBBLOCKS])
+ if (attack_intensity[i] > pch->attack_threshold)
+ attacks[i / PSY_LAME_NUM_SUBBLOCKS] = (i % PSY_LAME_NUM_SUBBLOCKS) + 1;
+
+ /* should have energy change between short blocks, in order to avoid periodic signals */
+ /* Good samples to show the effect are Trumpet test songs */
+ /* GB: tuned (1) to avoid too many short blocks for test sample TRUMPET */
+ /* RH: tuned (2) to let enough short blocks through for test sample FSOL and SNAPS */
+ for (i = 1; i < AAC_NUM_BLOCKS_SHORT + 1; i++) {
+ float const u = energy_short[i - 1];
+ float const v = energy_short[i];
+ float const m = FFMAX(u, v);
+ if (m < 40000) { /* (2) */
+ if (u < 1.7f * v && v < 1.7f * u) { /* (1) */
+ if (i == 1 && attacks[0] < attacks[i])
+ attacks[0] = 0;
+ attacks[i] = 0;
+ }
+ }
+ att_sum += attacks[i];
+ }
+
+ if (attacks[0] <= pch->prev_attack)
+ attacks[0] = 0;
+
+ att_sum += attacks[0];
+ /* 3 below indicates the previous attack happened in the last sub-block of the previous sequence */
+ if (pch->prev_attack == 3 || att_sum) {
+ uselongblock = 0;
+
+ for (i = 1; i < AAC_NUM_BLOCKS_SHORT + 1; i++)
+ if (attacks[i] && attacks[i-1])
+ attacks[i] = 0;
+ }
+ } else {
+ /* We have no lookahead info, so just use same type as the previous sequence. */
+ uselongblock = !(prev_type == EIGHT_SHORT_SEQUENCE);
+ }
+
+ lame_apply_block_type(pch, &wi, uselongblock);
+
+ wi.window_type[1] = prev_type;
+ if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) {
+ wi.num_windows = 1;
+ wi.grouping[0] = 1;
+ if (wi.window_type[0] == LONG_START_SEQUENCE)
+ wi.window_shape = 0;
+ else
+ wi.window_shape = 1;
+ } else {
+ int lastgrp = 0;
+
+ wi.num_windows = 8;
+ wi.window_shape = 0;
+ for (i = 0; i < 8; i++) {
+ if (!((pch->next_grouping >> i) & 1))
+ lastgrp = i;
+ wi.grouping[lastgrp]++;
+ }
+ }
+
+ /* Determine grouping, based on the location of the first attack, and save for
+ * the next frame.
+ * FIXME: Move this to analysis.
+ * TODO: Tune groupings depending on attack location
+ * TODO: Handle more than one attack in a group
+ */
+ for (i = 0; i < 9; i++) {
+ if (attacks[i]) {
+ grouping = i;
+ break;
+ }
+ }
+ pch->next_grouping = window_grouping[grouping];
+
+ pch->prev_attack = attacks[8];
+
+ return wi;
+}
+
+const FFPsyModel ff_aac_psy_model =
+{
+ .name = "3GPP TS 26.403-inspired model",
+ .init = psy_3gpp_init,
+ .window = psy_lame_window,
+ .analyze = psy_3gpp_analyze,
+ .end = psy_3gpp_end,
+};
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.h b/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.h
new file mode 100755
index 0000000..05c93cd
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacpsy.h
@@ -0,0 +1,50 @@
+/*
+ * AAC encoder psychoacoustic model
+ * Copyright (C) 2008 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AACPSY_H
+#define AVCODEC_AACPSY_H
+
+#include "avcodec.h"
+#include "aac.h"
+//#include "lowpass.h"
+
+enum AACPsyModelType{
+ AAC_PSY_TEST, ///< a sample model to exercise encoder
+ AAC_PSY_3GPP, ///< model following recommendations from 3GPP TS 26.403
+
+ AAC_NB_PSY_MODELS ///< total number of psychoacoustic models, since it's not a part of the ABI new models can be added freely
+};
+
+/**
+ * context used by psychoacoustic model
+ */
+typedef struct AACPsyContext {
+ AVCodecContext *avctx; ///< encoder context
+}AACPsyContext;
+
+/**
+ * Cleanup model context at the end.
+ *
+ * @param ctx model context
+ */
+void ff_aac_psy_end(AACPsyContext *ctx);
+
+#endif /* AVCODEC_AACPSY_H */
diff --git a/samples/rtsp_player/ffmpeg/libavcodec/aacsbr.c b/samples/rtsp_player/ffmpeg/libavcodec/aacsbr.c
new file mode 100755
index 0000000..6ac2cbc
--- /dev/null
+++ b/samples/rtsp_player/ffmpeg/libavcodec/aacsbr.c
@@ -0,0 +1,1761 @@
+/*
+ * AAC Spectral Band Replication decoding functions
+ * Copyright (c) 2008-2009 Robert Swain ( rob opendot cl )
+ * Copyright (c) 2009-2010 Alex Converse <alex.converse@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC Spectral Band Replication decoding functions
+ * @author Robert Swain ( rob opendot cl )
+ */
+
+#include "aac.h"
+#include "sbr.h"
+#include "aacsbr.h"
+#include "aacsbrdata.h"
+#include "fft.h"
+#include "aacps.h"
+
+#include <stdint.h>
+#include <float.h>
+#include <math.h>
+
+#define ENVELOPE_ADJUSTMENT_OFFSET 2
+#define NOISE_FLOOR_OFFSET 6.0f
+
+/**
+ * SBR VLC tables
+ */
+enum {
+ T_HUFFMAN_ENV_1_5DB,
+ F_HUFFMAN_ENV_1_5DB,
+ T_HUFFMAN_ENV_BAL_1_5DB,
+ F_HUFFMAN_ENV_BAL_1_5DB,
+ T_HUFFMAN_ENV_3_0DB,
+ F_HUFFMAN_ENV_3_0DB,
+ T_HUFFMAN_ENV_BAL_3_0DB,
+ F_HUFFMAN_ENV_BAL_3_0DB,
+ T_HUFFMAN_NOISE_3_0DB,
+ T_HUFFMAN_NOISE_BAL_3_0DB,
+};
+
+/**
+ * bs_frame_class - frame class of current SBR frame (14496-3 sp04 p98)