diff --git a/Limelight-iOS.xcodeproj/project.pbxproj b/Limelight-iOS.xcodeproj/project.pbxproj new file mode 100644 index 0000000..4c3fc83 --- /dev/null +++ b/Limelight-iOS.xcodeproj/project.pbxproj @@ -0,0 +1,4800 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXBuildFile section */ + FB05E4BE1896D83700E0CAC7 /* ConnectionHandler.m in Sources */ = {isa = PBXBuildFile; fileRef = FB05E4BD1896D83700E0CAC7 /* ConnectionHandler.m */; }; + FB05E4C01899730B00E0CAC7 /* AdSupport.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FB05E4BF1899730B00E0CAC7 /* AdSupport.framework */; }; + FB05E4C51899EC7F00E0CAC7 /* libxml2.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = FB05E4C11899E81500E0CAC7 /* libxml2.dylib */; }; + FB4E9460188BB80700CCC583 /* Connection.m in Sources */ = {isa = PBXBuildFile; fileRef = FB4E945F188BB80700CCC583 /* Connection.m */; }; + FB4E948B188BCC2900CCC583 /* VideoRenderer.m in Sources */ = {isa = PBXBuildFile; fileRef = FB4E948A188BCC2900CCC583 /* VideoRenderer.m */; }; + FBBC0240188E13A800CEB8E5 /* liblimelight-common.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FB4E945C188BB6FA00CCC583 /* liblimelight-common.a */; }; + FBBC53A5188ACD01004D2BA0 /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53A4188ACD01004D2BA0 /* libz.dylib */; }; + FBBC53A8188AD1D3004D2BA0 /* StreamFrameViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = FBBC53A7188AD1D3004D2BA0 /* StreamFrameViewController.m */; }; + FBBC53AE188AE27D004D2BA0 /* StreamView.m in Sources */ = {isa = PBXBuildFile; fileRef = FBBC53AD188AE27D004D2BA0 /* StreamView.m */; }; + FBBC53E4188BA7C7004D2BA0 /* libopus.la in Resources */ = {isa = PBXBuildFile; fileRef = FBBC53D4188BA7C7004D2BA0 /* libopus.la */; }; + FBBC53E5188BA7C7004D2BA0 /* opus.pc in Resources */ = {isa = PBXBuildFile; fileRef = FBBC53D6188BA7C7004D2BA0 /* opus.pc */; }; + FBBC53E7188BA7C7004D2BA0 /* libopus.la in Resources */ = {isa = PBXBuildFile; fileRef = FBBC53E0188BA7C7004D2BA0 /* libopus.la */; }; + FBBC53E8188BA7C7004D2BA0 /* opus.pc in Resources */ = {isa = PBXBuildFile; fileRef = FBBC53E2188BA7C7004D2BA0 /* opus.pc */; }; + FBBC53F1188BA7FC004D2BA0 /* libavcodec.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53E9188BA7FC004D2BA0 /* libavcodec.a */; }; + FBBC53F2188BA7FC004D2BA0 /* libavdevice.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53EA188BA7FC004D2BA0 /* libavdevice.a */; }; + FBBC53F3188BA7FC004D2BA0 /* libavfilter.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53EB188BA7FC004D2BA0 /* libavfilter.a */; }; + FBBC53F4188BA7FC004D2BA0 /* libavformat.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53EC188BA7FC004D2BA0 /* libavformat.a */; }; + FBBC53F5188BA7FC004D2BA0 /* libavresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53ED188BA7FC004D2BA0 /* libavresample.a */; }; + FBBC53F6188BA7FC004D2BA0 /* libavutil.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53EE188BA7FC004D2BA0 /* libavutil.a */; }; + FBBC53F7188BA7FC004D2BA0 /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53EF188BA7FC004D2BA0 /* libswresample.a */; }; + FBBC53F8188BA7FC004D2BA0 /* libswscale.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53F0188BA7FC004D2BA0 /* libswscale.a */; }; + FBBC53F9188BA82E004D2BA0 /* libopus.a in Frameworks */ = {isa = PBXBuildFile; fileRef = FBBC53D3188BA7C7004D2BA0 /* libopus.a */; }; + FBC18AE3188A2AB500D5D34E /* MainFrame.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = FBC18AE2188A2AB500D5D34E /* MainFrame.storyboard */; }; + FBC18B2B188A3B9100D5D34E /* MainFrameViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = FBC18B2A188A3B9100D5D34E /* MainFrameViewController.m */; }; + FBC18B2F188A4E0500D5D34E /* VideoDepacketizer.m in Sources */ = {isa = PBXBuildFile; fileRef = FBC18B2E188A4E0500D5D34E /* VideoDepacketizer.m */; }; + FBC18B32188A5A1100D5D34E /* VideoDecoder.m in Sources */ = {isa = PBXBuildFile; fileRef = FBC18B31188A5A1100D5D34E /* VideoDecoder.m */; }; + FBF6AE75188A274100B50578 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE74188A274100B50578 /* Foundation.framework */; }; + FBF6AE77188A274100B50578 /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE76188A274100B50578 /* CoreGraphics.framework */; }; + FBF6AE79188A274100B50578 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE78188A274100B50578 /* UIKit.framework */; }; + FBF6AE7B188A274100B50578 /* CoreData.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE7A188A274100B50578 /* CoreData.framework */; }; + FBF6AE83188A274100B50578 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = FBF6AE82188A274100B50578 /* main.m */; }; + FBF6AE87188A274100B50578 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = FBF6AE86188A274100B50578 /* AppDelegate.m */; }; + FBF6AE8A188A274100B50578 /* Limelight_iOS.xcdatamodeld in Sources */ = {isa = PBXBuildFile; fileRef = FBF6AE88188A274100B50578 /* Limelight_iOS.xcdatamodeld */; }; + FBF6AE8C188A274100B50578 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = FBF6AE8B188A274100B50578 /* Images.xcassets */; }; + FBF6AE93188A274100B50578 /* XCTest.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE92188A274100B50578 /* XCTest.framework */; }; + FBF6AE94188A274100B50578 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE74188A274100B50578 /* Foundation.framework */; }; + FBF6AE95188A274100B50578 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE78188A274100B50578 /* UIKit.framework */; }; + FBF6AE96188A274100B50578 /* CoreData.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FBF6AE7A188A274100B50578 /* CoreData.framework */; }; + FBF6AE9E188A274100B50578 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = FBF6AE9C188A274100B50578 /* InfoPlist.strings */; }; + FBF6AEA0188A274100B50578 /* Limelight_iOSTests.m in Sources */ = {isa = PBXBuildFile; fileRef = FBF6AE9F188A274100B50578 /* Limelight_iOSTests.m */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + FBF6AE97188A274100B50578 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = FBF6AE69188A274100B50578 /* Project object */; + proxyType = 1; + remoteGlobalIDString = FBF6AE70188A274100B50578; + remoteInfo = "Limelight-iOS"; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + FB05E4BC1896D83700E0CAC7 /* ConnectionHandler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConnectionHandler.h; sourceTree = ""; }; + FB05E4BD1896D83700E0CAC7 /* ConnectionHandler.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = ConnectionHandler.m; sourceTree = ""; }; + FB05E4BF1899730B00E0CAC7 /* AdSupport.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AdSupport.framework; path = System/Library/Frameworks/AdSupport.framework; sourceTree = SDKROOT; }; + FB05E4C11899E81500E0CAC7 /* libxml2.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libxml2.dylib; path = usr/lib/libxml2.dylib; sourceTree = SDKROOT; }; + FB05E4C31899EC4100E0CAC7 /* libxml2.2.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libxml2.2.dylib; path = usr/lib/libxml2.2.dylib; sourceTree = SDKROOT; }; + FB272728188A8A000093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavcodec.a; path = "../ffmpeg-built/universal/lib/libavcodec.a"; sourceTree = ""; }; + FB27272A188A8B3C0093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavcodec.a; path = "Limelight-iOS/libs/armv7s/lib/libavcodec.a"; sourceTree = ""; }; + FB27272B188A8B3C0093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavdevice.a; path = "Limelight-iOS/libs/armv7s/lib/libavdevice.a"; sourceTree = ""; }; + FB27272C188A8B3C0093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavfilter.a; path = "Limelight-iOS/libs/armv7s/lib/libavfilter.a"; sourceTree = ""; }; + FB27272D188A8B3C0093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavformat.a; path = "Limelight-iOS/libs/armv7s/lib/libavformat.a"; sourceTree = ""; }; + FB27272E188A8B3C0093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavresample.a; path = "Limelight-iOS/libs/armv7s/lib/libavresample.a"; sourceTree = ""; }; + FB27272F188A8B3C0093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavutil.a; path = "Limelight-iOS/libs/armv7s/lib/libavutil.a"; sourceTree = ""; }; + FB272730188A8B3C0093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = "Limelight-iOS/libs/armv7s/lib/libswresample.a"; sourceTree = ""; }; + FB272731188A8B3C0093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswscale.a; path = "Limelight-iOS/libs/armv7s/lib/libswscale.a"; sourceTree = ""; }; + FB27273F188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272740188A8BB20093CCC9 /* avfft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272741188A8BB20093CCC9 /* dxva2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272742188A8BB20093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272743188A8BB20093CCC9 /* vaapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB272744188A8BB20093CCC9 /* vda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB272745188A8BB20093CCC9 /* vdpau.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB272746188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272747188A8BB20093CCC9 /* xvmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB272749188A8BB20093CCC9 /* avdevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB27274A188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27274C188A8BB20093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB27274D188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB27274E188A8BB20093CCC9 /* avfilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB27274F188A8BB20093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272750188A8BB20093CCC9 /* buffersink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272751188A8BB20093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272752188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272754188A8BB20093CCC9 /* avformat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB272755188A8BB20093CCC9 /* avio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB272756188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272758188A8BB20093CCC9 /* avresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB272759188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27275B188A8BB20093CCC9 /* adler32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB27275C188A8BB20093CCC9 /* aes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB27275D188A8BB20093CCC9 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB27275E188A8BB20093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB27275F188A8BB20093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB272760188A8BB20093CCC9 /* avassert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB272761188A8BB20093CCC9 /* avconfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB272762188A8BB20093CCC9 /* avstring.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB272763188A8BB20093CCC9 /* avutil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB272764188A8BB20093CCC9 /* base64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB272765188A8BB20093CCC9 /* blowfish.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB272766188A8BB20093CCC9 /* bprint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB272767188A8BB20093CCC9 /* bswap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB272768188A8BB20093CCC9 /* buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB272769188A8BB20093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB27276A188A8BB20093CCC9 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB27276B188A8BB20093CCC9 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB27276C188A8BB20093CCC9 /* crc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB27276D188A8BB20093CCC9 /* dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB27276E188A8BB20093CCC9 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB27276F188A8BB20093CCC9 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB272770188A8BB20093CCC9 /* fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB272771188A8BB20093CCC9 /* file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB272772188A8BB20093CCC9 /* frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB272773188A8BB20093CCC9 /* hmac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB272774188A8BB20093CCC9 /* imgutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB272775188A8BB20093CCC9 /* intfloat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB272776188A8BB20093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB272777188A8BB20093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB272778188A8BB20093CCC9 /* lfg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB272779188A8BB20093CCC9 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB27277A188A8BB20093CCC9 /* lzo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB27277B188A8BB20093CCC9 /* mathematics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB27277C188A8BB20093CCC9 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB27277D188A8BB20093CCC9 /* mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB27277E188A8BB20093CCC9 /* murmur3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB27277F188A8BB20093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB272780188A8BB20093CCC9 /* opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB272781188A8BB20093CCC9 /* parseutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB272782188A8BB20093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB272783188A8BB20093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB272784188A8BB20093CCC9 /* random_seed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB272785188A8BB20093CCC9 /* rational.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB272786188A8BB20093CCC9 /* ripemd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB272787188A8BB20093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB272788188A8BB20093CCC9 /* sha.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB272789188A8BB20093CCC9 /* sha512.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB27278A188A8BB20093CCC9 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB27278B188A8BB20093CCC9 /* timecode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB27278C188A8BB20093CCC9 /* timestamp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB27278D188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27278E188A8BB20093CCC9 /* xtea.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB272790188A8BB20093CCC9 /* swresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB272791188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272793188A8BB20093CCC9 /* swscale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB272794188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272796188A8BB20093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB272797188A8BB20093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB272798188A8BB20093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB272799188A8BB20093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB27279A188A8BB20093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB27279B188A8BB20093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB27279C188A8BB20093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB27279D188A8BB20093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB27279F188A8BB20093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB2727A0188A8BB20093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB2727A1188A8BB20093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB2727A2188A8BB20093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB2727A3188A8BB20093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB2727A4188A8BB20093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB2727A5188A8BB20093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB2727A6188A8BB20093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB2727AA188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB2727AB188A8BB20093CCC9 /* avfft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB2727AC188A8BB20093CCC9 /* dxva2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB2727AD188A8BB20093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB2727AE188A8BB20093CCC9 /* vaapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB2727AF188A8BB20093CCC9 /* vda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB2727B0188A8BB20093CCC9 /* vdpau.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB2727B1188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727B2188A8BB20093CCC9 /* xvmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB2727B4188A8BB20093CCC9 /* avdevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB2727B5188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727B7188A8BB20093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB2727B8188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB2727B9188A8BB20093CCC9 /* avfilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB2727BA188A8BB20093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB2727BB188A8BB20093CCC9 /* buffersink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB2727BC188A8BB20093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB2727BD188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727BF188A8BB20093CCC9 /* avformat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB2727C0188A8BB20093CCC9 /* avio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB2727C1188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727C3188A8BB20093CCC9 /* avresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB2727C4188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727C6188A8BB20093CCC9 /* adler32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB2727C7188A8BB20093CCC9 /* aes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB2727C8188A8BB20093CCC9 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB2727C9188A8BB20093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB2727CA188A8BB20093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB2727CB188A8BB20093CCC9 /* avassert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB2727CC188A8BB20093CCC9 /* avconfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB2727CD188A8BB20093CCC9 /* avstring.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB2727CE188A8BB20093CCC9 /* avutil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB2727CF188A8BB20093CCC9 /* base64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB2727D0188A8BB20093CCC9 /* blowfish.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB2727D1188A8BB20093CCC9 /* bprint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB2727D2188A8BB20093CCC9 /* bswap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB2727D3188A8BB20093CCC9 /* buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB2727D4188A8BB20093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB2727D5188A8BB20093CCC9 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB2727D6188A8BB20093CCC9 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB2727D7188A8BB20093CCC9 /* crc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB2727D8188A8BB20093CCC9 /* dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB2727D9188A8BB20093CCC9 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB2727DA188A8BB20093CCC9 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB2727DB188A8BB20093CCC9 /* fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB2727DC188A8BB20093CCC9 /* file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB2727DD188A8BB20093CCC9 /* frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB2727DE188A8BB20093CCC9 /* hmac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB2727DF188A8BB20093CCC9 /* imgutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB2727E0188A8BB20093CCC9 /* intfloat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB2727E1188A8BB20093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB2727E2188A8BB20093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB2727E3188A8BB20093CCC9 /* lfg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB2727E4188A8BB20093CCC9 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB2727E5188A8BB20093CCC9 /* lzo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB2727E6188A8BB20093CCC9 /* mathematics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB2727E7188A8BB20093CCC9 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB2727E8188A8BB20093CCC9 /* mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB2727E9188A8BB20093CCC9 /* murmur3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB2727EA188A8BB20093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB2727EB188A8BB20093CCC9 /* opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB2727EC188A8BB20093CCC9 /* parseutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB2727ED188A8BB20093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB2727EE188A8BB20093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB2727EF188A8BB20093CCC9 /* random_seed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB2727F0188A8BB20093CCC9 /* rational.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB2727F1188A8BB20093CCC9 /* ripemd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB2727F2188A8BB20093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB2727F3188A8BB20093CCC9 /* sha.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB2727F4188A8BB20093CCC9 /* sha512.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB2727F5188A8BB20093CCC9 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB2727F6188A8BB20093CCC9 /* timecode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB2727F7188A8BB20093CCC9 /* timestamp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB2727F8188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727F9188A8BB20093CCC9 /* xtea.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB2727FB188A8BB20093CCC9 /* swresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB2727FC188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2727FE188A8BB20093CCC9 /* swscale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB2727FF188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272801188A8BB20093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB272802188A8BB20093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB272803188A8BB20093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB272804188A8BB20093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB272805188A8BB20093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB272806188A8BB20093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB272807188A8BB20093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB272808188A8BB20093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB27280A188A8BB20093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB27280B188A8BB20093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB27280C188A8BB20093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB27280D188A8BB20093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB27280E188A8BB20093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB27280F188A8BB20093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB272810188A8BB20093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB272811188A8BB20093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB272815188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272816188A8BB20093CCC9 /* avfft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272817188A8BB20093CCC9 /* dxva2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272818188A8BB20093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272819188A8BB20093CCC9 /* vaapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB27281A188A8BB20093CCC9 /* vda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB27281B188A8BB20093CCC9 /* vdpau.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB27281C188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27281D188A8BB20093CCC9 /* xvmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB27281F188A8BB20093CCC9 /* avdevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB272820188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272822188A8BB20093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB272823188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272824188A8BB20093CCC9 /* avfilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB272825188A8BB20093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272826188A8BB20093CCC9 /* buffersink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272827188A8BB20093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272828188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27282A188A8BB20093CCC9 /* avformat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB27282B188A8BB20093CCC9 /* avio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB27282C188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27282E188A8BB20093CCC9 /* avresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB27282F188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272831188A8BB20093CCC9 /* adler32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB272832188A8BB20093CCC9 /* aes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB272833188A8BB20093CCC9 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB272834188A8BB20093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB272835188A8BB20093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB272836188A8BB20093CCC9 /* avassert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB272837188A8BB20093CCC9 /* avconfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB272838188A8BB20093CCC9 /* avstring.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB272839188A8BB20093CCC9 /* avutil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB27283A188A8BB20093CCC9 /* base64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB27283B188A8BB20093CCC9 /* blowfish.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB27283C188A8BB20093CCC9 /* bprint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB27283D188A8BB20093CCC9 /* bswap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB27283E188A8BB20093CCC9 /* buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB27283F188A8BB20093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB272840188A8BB20093CCC9 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB272841188A8BB20093CCC9 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB272842188A8BB20093CCC9 /* crc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB272843188A8BB20093CCC9 /* dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB272844188A8BB20093CCC9 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB272845188A8BB20093CCC9 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB272846188A8BB20093CCC9 /* fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB272847188A8BB20093CCC9 /* file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB272848188A8BB20093CCC9 /* frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB272849188A8BB20093CCC9 /* hmac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB27284A188A8BB20093CCC9 /* imgutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB27284B188A8BB20093CCC9 /* intfloat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB27284C188A8BB20093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB27284D188A8BB20093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB27284E188A8BB20093CCC9 /* lfg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB27284F188A8BB20093CCC9 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB272850188A8BB20093CCC9 /* lzo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB272851188A8BB20093CCC9 /* mathematics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB272852188A8BB20093CCC9 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB272853188A8BB20093CCC9 /* mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB272854188A8BB20093CCC9 /* murmur3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB272855188A8BB20093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB272856188A8BB20093CCC9 /* opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB272857188A8BB20093CCC9 /* parseutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB272858188A8BB20093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB272859188A8BB20093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB27285A188A8BB20093CCC9 /* random_seed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB27285B188A8BB20093CCC9 /* rational.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB27285C188A8BB20093CCC9 /* ripemd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB27285D188A8BB20093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB27285E188A8BB20093CCC9 /* sha.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB27285F188A8BB20093CCC9 /* sha512.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB272860188A8BB20093CCC9 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB272861188A8BB20093CCC9 /* timecode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB272862188A8BB20093CCC9 /* timestamp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB272863188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272864188A8BB20093CCC9 /* xtea.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB272866188A8BB20093CCC9 /* swresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB272867188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272869188A8BB20093CCC9 /* swscale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB27286A188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27286C188A8BB20093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB27286D188A8BB20093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB27286E188A8BB20093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB27286F188A8BB20093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB272870188A8BB20093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB272871188A8BB20093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB272872188A8BB20093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB272873188A8BB20093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB272875188A8BB20093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB272876188A8BB20093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB272877188A8BB20093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB272878188A8BB20093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB272879188A8BB20093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB27287A188A8BB20093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB27287B188A8BB20093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB27287C188A8BB20093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB272880188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272881188A8BB20093CCC9 /* avfft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272882188A8BB20093CCC9 /* dxva2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272883188A8BB20093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272884188A8BB20093CCC9 /* vaapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB272885188A8BB20093CCC9 /* vda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB272886188A8BB20093CCC9 /* vdpau.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB272887188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272888188A8BB20093CCC9 /* xvmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB27288A188A8BB20093CCC9 /* avdevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB27288B188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27288D188A8BB20093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB27288E188A8BB20093CCC9 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB27288F188A8BB20093CCC9 /* avfilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB272890188A8BB20093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272891188A8BB20093CCC9 /* buffersink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272892188A8BB20093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272893188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272895188A8BB20093CCC9 /* avformat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB272896188A8BB20093CCC9 /* avio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB272897188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272899188A8BB20093CCC9 /* avresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB27289A188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27289C188A8BB20093CCC9 /* adler32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB27289D188A8BB20093CCC9 /* aes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB27289E188A8BB20093CCC9 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB27289F188A8BB20093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB2728A0188A8BB20093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB2728A1188A8BB20093CCC9 /* avassert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB2728A2188A8BB20093CCC9 /* avconfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB2728A3188A8BB20093CCC9 /* avstring.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB2728A4188A8BB20093CCC9 /* avutil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB2728A5188A8BB20093CCC9 /* base64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB2728A6188A8BB20093CCC9 /* blowfish.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB2728A7188A8BB20093CCC9 /* bprint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB2728A8188A8BB20093CCC9 /* bswap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB2728A9188A8BB20093CCC9 /* buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB2728AA188A8BB20093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB2728AB188A8BB20093CCC9 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB2728AC188A8BB20093CCC9 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB2728AD188A8BB20093CCC9 /* crc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB2728AE188A8BB20093CCC9 /* dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB2728AF188A8BB20093CCC9 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB2728B0188A8BB20093CCC9 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB2728B1188A8BB20093CCC9 /* fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB2728B2188A8BB20093CCC9 /* file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB2728B3188A8BB20093CCC9 /* frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB2728B4188A8BB20093CCC9 /* hmac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB2728B5188A8BB20093CCC9 /* imgutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB2728B6188A8BB20093CCC9 /* intfloat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB2728B7188A8BB20093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB2728B8188A8BB20093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB2728B9188A8BB20093CCC9 /* lfg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB2728BA188A8BB20093CCC9 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB2728BB188A8BB20093CCC9 /* lzo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB2728BC188A8BB20093CCC9 /* mathematics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB2728BD188A8BB20093CCC9 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB2728BE188A8BB20093CCC9 /* mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB2728BF188A8BB20093CCC9 /* murmur3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB2728C0188A8BB20093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB2728C1188A8BB20093CCC9 /* opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB2728C2188A8BB20093CCC9 /* parseutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB2728C3188A8BB20093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB2728C4188A8BB20093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB2728C5188A8BB20093CCC9 /* random_seed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB2728C6188A8BB20093CCC9 /* rational.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB2728C7188A8BB20093CCC9 /* ripemd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB2728C8188A8BB20093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB2728C9188A8BB20093CCC9 /* sha.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB2728CA188A8BB20093CCC9 /* sha512.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB2728CB188A8BB20093CCC9 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB2728CC188A8BB20093CCC9 /* timecode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB2728CD188A8BB20093CCC9 /* timestamp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB2728CE188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2728CF188A8BB20093CCC9 /* xtea.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB2728D1188A8BB20093CCC9 /* swresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB2728D2188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2728D4188A8BB20093CCC9 /* swscale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB2728D5188A8BB20093CCC9 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2728D7188A8BB20093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB2728D8188A8BB20093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB2728D9188A8BB20093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB2728DA188A8BB20093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB2728DB188A8BB20093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB2728DC188A8BB20093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB2728DD188A8BB20093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB2728DE188A8BB20093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB272918188A93360093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272919188A93360093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB27291B188A93360093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB27291E188A93360093CCC9 /* Icon-40.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-40.png"; sourceTree = ""; }; + FB27291F188A93360093CCC9 /* Icon-40@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-40@2x.png"; sourceTree = ""; }; + FB272920188A93360093CCC9 /* Icon-60.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-60.png"; sourceTree = ""; }; + FB272921188A93360093CCC9 /* Icon-60@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-60@2x.png"; sourceTree = ""; }; + FB272922188A93360093CCC9 /* Icon-72.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-72.png"; sourceTree = ""; }; + FB272923188A93360093CCC9 /* Icon-72@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-72@2x.png"; sourceTree = ""; }; + FB272924188A93360093CCC9 /* Icon-76.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-76.png"; sourceTree = ""; }; + FB272925188A93360093CCC9 /* Icon-76@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-76@2x.png"; sourceTree = ""; }; + FB272926188A93360093CCC9 /* Icon-Small-50.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-Small-50.png"; sourceTree = ""; }; + FB272927188A93360093CCC9 /* Icon-Small-50@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-Small-50@2x.png"; sourceTree = ""; }; + FB272928188A93360093CCC9 /* Icon-Small.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-Small.png"; sourceTree = ""; }; + FB272929188A93360093CCC9 /* Icon-Small@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon-Small@2x.png"; sourceTree = ""; }; + FB27292A188A93360093CCC9 /* Icon.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = Icon.png; sourceTree = ""; }; + FB27292B188A93360093CCC9 /* Icon@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "Icon@2x.png"; sourceTree = ""; }; + FB27292C188A93360093CCC9 /* iTunesArtwork.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = iTunesArtwork.png; sourceTree = ""; }; + FB27292D188A93360093CCC9 /* iTunesArtwork@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = "iTunesArtwork@2x.png"; sourceTree = ""; }; + FB27292E188A93360093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272933188A93360093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272934188A93360093CCC9 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272935188A93360093CCC9 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272936188A93360093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272937188A93360093CCC9 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB272938188A93360093CCC9 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB272939188A93360093CCC9 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB27293A188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27293B188A93360093CCC9 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB27293D188A93360093CCC9 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB27293E188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272940188A93360093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB272941188A93360093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272942188A93360093CCC9 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB272943188A93360093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272944188A93360093CCC9 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272945188A93360093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272946188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272948188A93360093CCC9 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB272949188A93360093CCC9 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB27294A188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27294C188A93360093CCC9 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB27294D188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27294F188A93360093CCC9 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB272950188A93360093CCC9 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB272951188A93360093CCC9 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB272952188A93360093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB272953188A93360093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB272954188A93360093CCC9 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB272955188A93360093CCC9 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB272956188A93360093CCC9 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB272957188A93360093CCC9 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB272958188A93360093CCC9 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB272959188A93360093CCC9 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB27295A188A93360093CCC9 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB27295B188A93360093CCC9 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB27295C188A93360093CCC9 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB27295D188A93360093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB27295E188A93360093CCC9 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB27295F188A93360093CCC9 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB272960188A93360093CCC9 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB272961188A93360093CCC9 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB272962188A93360093CCC9 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB272963188A93360093CCC9 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB272964188A93360093CCC9 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB272965188A93360093CCC9 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB272966188A93360093CCC9 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB272967188A93360093CCC9 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB272968188A93360093CCC9 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB272969188A93360093CCC9 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB27296A188A93360093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB27296B188A93360093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB27296C188A93360093CCC9 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB27296D188A93360093CCC9 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB27296E188A93360093CCC9 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB27296F188A93360093CCC9 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB272970188A93360093CCC9 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB272971188A93360093CCC9 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB272972188A93360093CCC9 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB272973188A93360093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB272974188A93360093CCC9 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB272975188A93360093CCC9 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB272976188A93360093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB272977188A93360093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB272978188A93360093CCC9 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB272979188A93360093CCC9 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB27297A188A93360093CCC9 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB27297B188A93360093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB27297C188A93360093CCC9 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB27297D188A93360093CCC9 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB27297E188A93360093CCC9 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB27297F188A93360093CCC9 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB272980188A93360093CCC9 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB272981188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272982188A93360093CCC9 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB272984188A93360093CCC9 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB272985188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272987188A93360093CCC9 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB272988188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB27298A188A93360093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB27298B188A93360093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB27298C188A93360093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB27298D188A93360093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB27298E188A93360093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB27298F188A93360093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB272990188A93360093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB272991188A93360093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB272993188A93360093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB272994188A93360093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB272995188A93360093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB272996188A93360093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB272997188A93360093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB272998188A93360093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB272999188A93360093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB27299A188A93360093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB27299E188A93360093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB27299F188A93360093CCC9 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB2729A0188A93360093CCC9 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB2729A1188A93360093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB2729A2188A93360093CCC9 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB2729A3188A93360093CCC9 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB2729A4188A93360093CCC9 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB2729A5188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729A6188A93360093CCC9 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB2729A8188A93360093CCC9 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB2729A9188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729AB188A93360093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB2729AC188A93360093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB2729AD188A93360093CCC9 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB2729AE188A93360093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB2729AF188A93360093CCC9 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB2729B0188A93360093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB2729B1188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729B3188A93360093CCC9 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB2729B4188A93360093CCC9 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB2729B5188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729B7188A93360093CCC9 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB2729B8188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729BA188A93360093CCC9 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB2729BB188A93360093CCC9 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB2729BC188A93360093CCC9 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB2729BD188A93360093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB2729BE188A93360093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB2729BF188A93360093CCC9 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB2729C0188A93360093CCC9 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB2729C1188A93360093CCC9 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB2729C2188A93360093CCC9 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB2729C3188A93360093CCC9 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB2729C4188A93360093CCC9 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB2729C5188A93360093CCC9 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB2729C6188A93360093CCC9 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB2729C7188A93360093CCC9 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB2729C8188A93360093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB2729C9188A93360093CCC9 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB2729CA188A93360093CCC9 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB2729CB188A93360093CCC9 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB2729CC188A93360093CCC9 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB2729CD188A93360093CCC9 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB2729CE188A93360093CCC9 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB2729CF188A93360093CCC9 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB2729D0188A93360093CCC9 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB2729D1188A93360093CCC9 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB2729D2188A93360093CCC9 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB2729D3188A93360093CCC9 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB2729D4188A93360093CCC9 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB2729D5188A93360093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB2729D6188A93360093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB2729D7188A93360093CCC9 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB2729D8188A93360093CCC9 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB2729D9188A93360093CCC9 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB2729DA188A93360093CCC9 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB2729DB188A93360093CCC9 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB2729DC188A93360093CCC9 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB2729DD188A93360093CCC9 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB2729DE188A93360093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB2729DF188A93360093CCC9 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB2729E0188A93360093CCC9 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB2729E1188A93360093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB2729E2188A93360093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB2729E3188A93360093CCC9 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB2729E4188A93360093CCC9 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB2729E5188A93360093CCC9 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB2729E6188A93360093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB2729E7188A93360093CCC9 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB2729E8188A93360093CCC9 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB2729E9188A93360093CCC9 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB2729EA188A93360093CCC9 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB2729EB188A93360093CCC9 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB2729EC188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729ED188A93360093CCC9 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB2729EF188A93360093CCC9 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB2729F0188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729F2188A93360093CCC9 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB2729F3188A93360093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB2729F5188A93370093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB2729F6188A93370093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB2729F7188A93370093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB2729F8188A93370093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB2729F9188A93370093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB2729FA188A93370093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB2729FB188A93370093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB2729FC188A93370093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB2729FE188A93370093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB2729FF188A93370093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB272A00188A93370093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB272A01188A93370093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB272A02188A93370093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB272A03188A93370093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB272A04188A93370093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB272A05188A93370093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB272A09188A93370093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272A0A188A93370093CCC9 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272A0B188A93370093CCC9 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272A0C188A93370093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272A0D188A93370093CCC9 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB272A0E188A93370093CCC9 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB272A0F188A93370093CCC9 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB272A10188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A11188A93370093CCC9 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB272A13188A93370093CCC9 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB272A14188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A16188A93370093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB272A17188A93370093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272A18188A93370093CCC9 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB272A19188A93370093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272A1A188A93370093CCC9 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272A1B188A93370093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272A1C188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A1E188A93370093CCC9 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB272A1F188A93370093CCC9 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB272A20188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A22188A93370093CCC9 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB272A23188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A25188A93370093CCC9 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB272A26188A93370093CCC9 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB272A27188A93370093CCC9 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB272A28188A93370093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB272A29188A93370093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB272A2A188A93370093CCC9 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB272A2B188A93370093CCC9 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB272A2C188A93370093CCC9 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB272A2D188A93370093CCC9 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB272A2E188A93370093CCC9 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB272A2F188A93370093CCC9 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB272A30188A93370093CCC9 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB272A31188A93370093CCC9 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB272A32188A93370093CCC9 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB272A33188A93370093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB272A34188A93370093CCC9 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB272A35188A93370093CCC9 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB272A36188A93370093CCC9 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB272A37188A93370093CCC9 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB272A38188A93370093CCC9 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB272A39188A93370093CCC9 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB272A3A188A93370093CCC9 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB272A3B188A93370093CCC9 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB272A3C188A93370093CCC9 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB272A3D188A93370093CCC9 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB272A3E188A93370093CCC9 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB272A3F188A93370093CCC9 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB272A40188A93370093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB272A41188A93370093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB272A42188A93370093CCC9 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB272A43188A93370093CCC9 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB272A44188A93370093CCC9 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB272A45188A93370093CCC9 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB272A46188A93370093CCC9 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB272A47188A93370093CCC9 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB272A48188A93370093CCC9 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB272A49188A93370093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB272A4A188A93370093CCC9 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB272A4B188A93370093CCC9 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB272A4C188A93370093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB272A4D188A93370093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB272A4E188A93370093CCC9 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB272A4F188A93370093CCC9 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB272A50188A93370093CCC9 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB272A51188A93370093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB272A52188A93370093CCC9 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB272A53188A93370093CCC9 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB272A54188A93370093CCC9 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB272A55188A93370093CCC9 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB272A56188A93370093CCC9 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB272A57188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A58188A93370093CCC9 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB272A5A188A93370093CCC9 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB272A5B188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A5D188A93370093CCC9 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB272A5E188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A60188A93370093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB272A61188A93370093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB272A62188A93370093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB272A63188A93370093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB272A64188A93370093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB272A65188A93370093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB272A66188A93370093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB272A67188A93370093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB272A69188A93370093CCC9 /* libavcodec.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FB272A6A188A93370093CCC9 /* libavdevice.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FB272A6B188A93370093CCC9 /* libavfilter.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FB272A6C188A93370093CCC9 /* libavformat.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FB272A6D188A93370093CCC9 /* libavresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FB272A6E188A93370093CCC9 /* libavutil.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FB272A6F188A93370093CCC9 /* libswresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FB272A70188A93370093CCC9 /* libswscale.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FB272A74188A93370093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272A75188A93370093CCC9 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FB272A76188A93370093CCC9 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FB272A77188A93370093CCC9 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FB272A78188A93370093CCC9 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FB272A79188A93370093CCC9 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FB272A7A188A93370093CCC9 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FB272A7B188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A7C188A93370093CCC9 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FB272A7E188A93370093CCC9 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FB272A7F188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A81188A93370093CCC9 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FB272A82188A93370093CCC9 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FB272A83188A93370093CCC9 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FB272A84188A93370093CCC9 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FB272A85188A93370093CCC9 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FB272A86188A93370093CCC9 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FB272A87188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A89188A93370093CCC9 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FB272A8A188A93370093CCC9 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FB272A8B188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A8D188A93370093CCC9 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FB272A8E188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272A90188A93370093CCC9 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FB272A91188A93370093CCC9 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FB272A92188A93370093CCC9 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FB272A93188A93370093CCC9 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FB272A94188A93370093CCC9 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FB272A95188A93370093CCC9 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FB272A96188A93370093CCC9 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FB272A97188A93370093CCC9 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FB272A98188A93370093CCC9 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FB272A99188A93370093CCC9 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FB272A9A188A93370093CCC9 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FB272A9B188A93370093CCC9 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FB272A9C188A93370093CCC9 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FB272A9D188A93370093CCC9 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FB272A9E188A93370093CCC9 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FB272A9F188A93370093CCC9 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FB272AA0188A93370093CCC9 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FB272AA1188A93370093CCC9 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FB272AA2188A93370093CCC9 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FB272AA3188A93370093CCC9 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FB272AA4188A93370093CCC9 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FB272AA5188A93370093CCC9 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FB272AA6188A93370093CCC9 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FB272AA7188A93370093CCC9 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FB272AA8188A93370093CCC9 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FB272AA9188A93370093CCC9 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FB272AAA188A93370093CCC9 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FB272AAB188A93370093CCC9 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FB272AAC188A93370093CCC9 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FB272AAD188A93370093CCC9 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FB272AAE188A93370093CCC9 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FB272AAF188A93370093CCC9 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FB272AB0188A93370093CCC9 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FB272AB1188A93370093CCC9 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FB272AB2188A93370093CCC9 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FB272AB3188A93370093CCC9 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FB272AB4188A93370093CCC9 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FB272AB5188A93370093CCC9 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FB272AB6188A93370093CCC9 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FB272AB7188A93370093CCC9 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FB272AB8188A93370093CCC9 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FB272AB9188A93370093CCC9 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FB272ABA188A93370093CCC9 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FB272ABB188A93370093CCC9 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FB272ABC188A93370093CCC9 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FB272ABD188A93370093CCC9 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FB272ABE188A93370093CCC9 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FB272ABF188A93370093CCC9 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FB272AC0188A93370093CCC9 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FB272AC1188A93370093CCC9 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FB272AC2188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272AC3188A93370093CCC9 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FB272AC5188A93370093CCC9 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FB272AC6188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272AC8188A93370093CCC9 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FB272AC9188A93370093CCC9 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FB272ACB188A93370093CCC9 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FB272ACC188A93370093CCC9 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FB272ACD188A93370093CCC9 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FB272ACE188A93370093CCC9 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FB272ACF188A93370093CCC9 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FB272AD0188A93370093CCC9 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FB272AD1188A93370093CCC9 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FB272AD2188A93370093CCC9 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FB272AD5188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272AD6188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272AD8188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AD9188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272ADC188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272ADD188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272ADF188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AE0188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272AE3188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272AE4188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272AE6188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AE7188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272AEA188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272AEB188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272AED188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AEE188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272AF1188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272AF2188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272AF4188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AF5188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272AF8188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272AF9188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272AFB188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272AFC188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272AFF188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B00188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B02188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B03188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B06188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B07188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B09188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B0A188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B0D188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B0E188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B10188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B11188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B14188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B15188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B17188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B18188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B1B188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B1C188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B1E188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B1F188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B22188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B23188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B25188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B26188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B29188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B2A188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B2C188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B2D188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B30188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B31188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B33188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B34188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B37188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B38188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B3A188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B3B188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B3E188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B3F188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B41188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B42188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B45188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B46188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B48188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B49188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B4C188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B4D188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B4F188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B50188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B53188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B54188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B56188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B57188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B5A188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B5B188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B5D188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B5E188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B61188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B62188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B64188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B65188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B68188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B69188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B6B188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B6C188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B6F188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B70188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B72188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B73188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B76188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B77188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B79188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B7A188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B7D188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B7E188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B80188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B81188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B84188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B85188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B87188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B88188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B8B188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B8C188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B8E188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B8F188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B92188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B93188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B95188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B96188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272B99188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272B9A188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272B9C188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272B9D188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272BA0188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272BA1188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272BA3188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272BA4188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272BA7188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272BA8188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272BAA188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272BAB188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272BAE188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272BAF188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272BB1188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272BB2188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272BB5188A93370093CCC9 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FB272BB6188A93370093CCC9 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FB272BB8188A93370093CCC9 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FB272BB9188A93370093CCC9 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FB272BBA188A93370093CCC9 /* Limelight-iOS-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Limelight-iOS-Info.plist"; sourceTree = ""; }; + FB272BBB188A93370093CCC9 /* Limelight-iOS-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "Limelight-iOS-Prefix.pch"; sourceTree = ""; }; + FB272BBD188A93370093CCC9 /* Limelight_iOS.xcdatamodel */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcdatamodel; path = Limelight_iOS.xcdatamodel; sourceTree = ""; }; + FB272BBE188A93370093CCC9 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + FB272BBF188A93370093CCC9 /* MainFrameViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MainFrameViewController.h; sourceTree = ""; }; + FB272BC0188A93370093CCC9 /* MainFrameViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = MainFrameViewController.m; sourceTree = ""; }; + FB272BC1188A93370093CCC9 /* notpadded.h264 */ = {isa = PBXFileReference; lastKnownFileType = file; path = notpadded.h264; sourceTree = ""; }; + FB272BC3188A93370093CCC9 /* VideoDepacketizer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = VideoDepacketizer.h; sourceTree = ""; }; + FB272BC4188A93370093CCC9 /* VideoDepacketizer.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = VideoDepacketizer.m; sourceTree = ""; }; + FB272BC5188A93370093CCC9 /* VideoDecoder.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = VideoDecoder.h; sourceTree = ""; }; + FB272BC6188A93370093CCC9 /* VideoDecoder.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = VideoDecoder.m; sourceTree = ""; }; + FB4E945C188BB6FA00CCC583 /* liblimelight-common.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = "liblimelight-common.a"; path = "../../../Library/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos/liblimelight-common.a"; sourceTree = ""; }; + FB4E945E188BB80700CCC583 /* Connection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Connection.h; sourceTree = ""; }; + FB4E945F188BB80700CCC583 /* Connection.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Connection.m; sourceTree = ""; }; + FB4E9461188BC09600CCC583 /* liblimelight-common.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = "liblimelight-common.a"; path = "../../../Library/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Intermediates/limelight-common-c.build/Debug-iphoneos/limelight-common.build/Objects-normal/armv7/liblimelight-common.a"; sourceTree = ""; }; + FB4E9486188BC39600CCC583 /* liblimelight-common.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = "liblimelight-common.a"; sourceTree = ""; }; + FB4E9487188BC39600CCC583 /* liblimelight-common.a.dSYM */ = {isa = PBXFileReference; lastKnownFileType = wrapper.dsym; path = "liblimelight-common.a.dSYM"; sourceTree = ""; }; + FB4E9489188BCC2900CCC583 /* VideoRenderer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VideoRenderer.h; sourceTree = ""; }; + FB4E948A188BCC2900CCC583 /* VideoRenderer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = VideoRenderer.m; sourceTree = ""; }; + FBBC52C9188ACA92004D2BA0 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FBBC52CA188ACA92004D2BA0 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FBBC52CB188ACA92004D2BA0 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FBBC52CC188ACA92004D2BA0 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FBBC52CD188ACA92004D2BA0 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FBBC52CE188ACA92004D2BA0 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FBBC52CF188ACA92004D2BA0 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FBBC52D0188ACA92004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC52D1188ACA92004D2BA0 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FBBC52D3188ACA92004D2BA0 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FBBC52D4188ACA92004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC52D6188ACA92004D2BA0 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FBBC52D7188ACA92004D2BA0 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FBBC52D8188ACA92004D2BA0 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FBBC52D9188ACA92004D2BA0 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FBBC52DA188ACA92004D2BA0 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FBBC52DB188ACA92004D2BA0 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FBBC52DC188ACA92004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC52DE188ACA92004D2BA0 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FBBC52DF188ACA92004D2BA0 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FBBC52E0188ACA92004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC52E2188ACA92004D2BA0 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FBBC52E3188ACA92004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC52E5188ACA92004D2BA0 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FBBC52E6188ACA92004D2BA0 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FBBC52E7188ACA92004D2BA0 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FBBC52E8188ACA92004D2BA0 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FBBC52E9188ACA92004D2BA0 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FBBC52EA188ACA92004D2BA0 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FBBC52EB188ACA92004D2BA0 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FBBC52EC188ACA92004D2BA0 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FBBC52ED188ACA92004D2BA0 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FBBC52EE188ACA93004D2BA0 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FBBC52EF188ACA93004D2BA0 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FBBC52F0188ACA93004D2BA0 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FBBC52F1188ACA93004D2BA0 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FBBC52F2188ACA93004D2BA0 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FBBC52F3188ACA93004D2BA0 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FBBC52F4188ACA93004D2BA0 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FBBC52F5188ACA93004D2BA0 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FBBC52F6188ACA93004D2BA0 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FBBC52F7188ACA93004D2BA0 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FBBC52F8188ACA93004D2BA0 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FBBC52F9188ACA93004D2BA0 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FBBC52FA188ACA93004D2BA0 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FBBC52FB188ACA93004D2BA0 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FBBC52FC188ACA93004D2BA0 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FBBC52FD188ACA93004D2BA0 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FBBC52FE188ACA93004D2BA0 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FBBC52FF188ACA93004D2BA0 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FBBC5300188ACA93004D2BA0 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FBBC5301188ACA93004D2BA0 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FBBC5302188ACA93004D2BA0 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FBBC5303188ACA93004D2BA0 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FBBC5304188ACA93004D2BA0 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FBBC5305188ACA93004D2BA0 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FBBC5306188ACA93004D2BA0 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FBBC5307188ACA93004D2BA0 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FBBC5308188ACA93004D2BA0 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FBBC5309188ACA93004D2BA0 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FBBC530A188ACA93004D2BA0 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FBBC530B188ACA93004D2BA0 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FBBC530C188ACA93004D2BA0 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FBBC530D188ACA93004D2BA0 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FBBC530E188ACA93004D2BA0 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FBBC530F188ACA93004D2BA0 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FBBC5310188ACA93004D2BA0 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FBBC5311188ACA93004D2BA0 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FBBC5312188ACA93004D2BA0 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FBBC5313188ACA93004D2BA0 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FBBC5314188ACA93004D2BA0 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FBBC5315188ACA93004D2BA0 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FBBC5316188ACA93004D2BA0 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FBBC5317188ACA93004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5318188ACA93004D2BA0 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FBBC531A188ACA93004D2BA0 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FBBC531B188ACA93004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC531D188ACA93004D2BA0 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FBBC531E188ACA93004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5320188ACA93004D2BA0 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FBBC5321188ACA93004D2BA0 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FBBC5322188ACA93004D2BA0 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FBBC5323188ACA93004D2BA0 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FBBC5324188ACA93004D2BA0 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FBBC5325188ACA93004D2BA0 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FBBC5326188ACA93004D2BA0 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FBBC5327188ACA93004D2BA0 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FBBC5329188ACA93004D2BA0 /* libavcodec.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FBBC532A188ACA93004D2BA0 /* libavdevice.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FBBC532B188ACA93004D2BA0 /* libavfilter.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FBBC532C188ACA93004D2BA0 /* libavformat.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FBBC532D188ACA93004D2BA0 /* libavresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FBBC532E188ACA93004D2BA0 /* libavutil.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FBBC532F188ACA93004D2BA0 /* libswresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FBBC5330188ACA93004D2BA0 /* libswscale.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FBBC5334188ACA99004D2BA0 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FBBC5335188ACA99004D2BA0 /* avfft.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = ""; }; + FBBC5336188ACA99004D2BA0 /* dxva2.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = ""; }; + FBBC5337188ACA99004D2BA0 /* old_codec_ids.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = ""; }; + FBBC5338188ACA99004D2BA0 /* vaapi.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = ""; }; + FBBC5339188ACA99004D2BA0 /* vda.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = ""; }; + FBBC533A188ACA99004D2BA0 /* vdpau.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = ""; }; + FBBC533B188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC533C188ACA99004D2BA0 /* xvmc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = ""; }; + FBBC533E188ACA99004D2BA0 /* avdevice.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = ""; }; + FBBC533F188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5341188ACA99004D2BA0 /* asrc_abuffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = ""; }; + FBBC5342188ACA99004D2BA0 /* avcodec.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = ""; }; + FBBC5343188ACA99004D2BA0 /* avfilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = ""; }; + FBBC5344188ACA99004D2BA0 /* avfiltergraph.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = ""; }; + FBBC5345188ACA99004D2BA0 /* buffersink.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = ""; }; + FBBC5346188ACA99004D2BA0 /* buffersrc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = ""; }; + FBBC5347188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5349188ACA99004D2BA0 /* avformat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = ""; }; + FBBC534A188ACA99004D2BA0 /* avio.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = ""; }; + FBBC534B188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC534D188ACA99004D2BA0 /* avresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = ""; }; + FBBC534E188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5350188ACA99004D2BA0 /* adler32.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = ""; }; + FBBC5351188ACA99004D2BA0 /* aes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = ""; }; + FBBC5352188ACA99004D2BA0 /* attributes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = ""; }; + FBBC5353188ACA99004D2BA0 /* audio_fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = ""; }; + FBBC5354188ACA99004D2BA0 /* audioconvert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = ""; }; + FBBC5355188ACA99004D2BA0 /* avassert.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = ""; }; + FBBC5356188ACA99004D2BA0 /* avconfig.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = ""; }; + FBBC5357188ACA99004D2BA0 /* avstring.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = ""; }; + FBBC5358188ACA99004D2BA0 /* avutil.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = ""; }; + FBBC5359188ACA99004D2BA0 /* base64.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = ""; }; + FBBC535A188ACA99004D2BA0 /* blowfish.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = ""; }; + FBBC535B188ACA99004D2BA0 /* bprint.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = ""; }; + FBBC535C188ACA99004D2BA0 /* bswap.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = ""; }; + FBBC535D188ACA99004D2BA0 /* buffer.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = buffer.h; sourceTree = ""; }; + FBBC535E188ACA99004D2BA0 /* channel_layout.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = ""; }; + FBBC535F188ACA99004D2BA0 /* common.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = ""; }; + FBBC5360188ACA99004D2BA0 /* cpu.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = ""; }; + FBBC5361188ACA99004D2BA0 /* crc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = ""; }; + FBBC5362188ACA99004D2BA0 /* dict.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = ""; }; + FBBC5363188ACA99004D2BA0 /* error.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = ""; }; + FBBC5364188ACA99004D2BA0 /* eval.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = ""; }; + FBBC5365188ACA99004D2BA0 /* fifo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = ""; }; + FBBC5366188ACA99004D2BA0 /* file.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = ""; }; + FBBC5367188ACA99004D2BA0 /* frame.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = frame.h; sourceTree = ""; }; + FBBC5368188ACA99004D2BA0 /* hmac.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = ""; }; + FBBC5369188ACA99004D2BA0 /* imgutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = ""; }; + FBBC536A188ACA99004D2BA0 /* intfloat.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = ""; }; + FBBC536B188ACA99004D2BA0 /* intfloat_readwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = ""; }; + FBBC536C188ACA99004D2BA0 /* intreadwrite.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = ""; }; + FBBC536D188ACA99004D2BA0 /* lfg.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = ""; }; + FBBC536E188ACA99004D2BA0 /* log.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = ""; }; + FBBC536F188ACA99004D2BA0 /* lzo.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = ""; }; + FBBC5370188ACA99004D2BA0 /* mathematics.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = ""; }; + FBBC5371188ACA99004D2BA0 /* md5.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = ""; }; + FBBC5372188ACA99004D2BA0 /* mem.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = ""; }; + FBBC5373188ACA99004D2BA0 /* murmur3.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = murmur3.h; sourceTree = ""; }; + FBBC5374188ACA99004D2BA0 /* old_pix_fmts.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = ""; }; + FBBC5375188ACA99004D2BA0 /* opt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = ""; }; + FBBC5376188ACA99004D2BA0 /* parseutils.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = ""; }; + FBBC5377188ACA99004D2BA0 /* pixdesc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = ""; }; + FBBC5378188ACA99004D2BA0 /* pixfmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = ""; }; + FBBC5379188ACA99004D2BA0 /* random_seed.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = ""; }; + FBBC537A188ACA99004D2BA0 /* rational.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = ""; }; + FBBC537B188ACA99004D2BA0 /* ripemd.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ripemd.h; sourceTree = ""; }; + FBBC537C188ACA99004D2BA0 /* samplefmt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = ""; }; + FBBC537D188ACA99004D2BA0 /* sha.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = ""; }; + FBBC537E188ACA99004D2BA0 /* sha512.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = sha512.h; sourceTree = ""; }; + FBBC537F188ACA99004D2BA0 /* time.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FBBC5380188ACA99004D2BA0 /* timecode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = ""; }; + FBBC5381188ACA99004D2BA0 /* timestamp.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = ""; }; + FBBC5382188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5383188ACA99004D2BA0 /* xtea.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = ""; }; + FBBC5385188ACA99004D2BA0 /* swresample.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = ""; }; + FBBC5386188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC5388188ACA99004D2BA0 /* swscale.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = ""; }; + FBBC5389188ACA99004D2BA0 /* version.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = ""; }; + FBBC538B188ACA99004D2BA0 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = ""; }; + FBBC538C188ACA99004D2BA0 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = ""; }; + FBBC538D188ACA99004D2BA0 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = ""; }; + FBBC538E188ACA99004D2BA0 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = ""; }; + FBBC538F188ACA99004D2BA0 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = ""; }; + FBBC5390188ACA99004D2BA0 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = ""; }; + FBBC5391188ACA99004D2BA0 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = ""; }; + FBBC5392188ACA99004D2BA0 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = ""; }; + FBBC5394188ACA99004D2BA0 /* libavcodec.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavcodec.pc; sourceTree = ""; }; + FBBC5395188ACA99004D2BA0 /* libavdevice.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavdevice.pc; sourceTree = ""; }; + FBBC5396188ACA99004D2BA0 /* libavfilter.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavfilter.pc; sourceTree = ""; }; + FBBC5397188ACA99004D2BA0 /* libavformat.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavformat.pc; sourceTree = ""; }; + FBBC5398188ACA99004D2BA0 /* libavresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavresample.pc; sourceTree = ""; }; + FBBC5399188ACA99004D2BA0 /* libavutil.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libavutil.pc; sourceTree = ""; }; + FBBC539A188ACA99004D2BA0 /* libswresample.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswresample.pc; sourceTree = ""; }; + FBBC539B188ACA99004D2BA0 /* libswscale.pc */ = {isa = PBXFileReference; lastKnownFileType = text; path = libswscale.pc; sourceTree = ""; }; + FBBC53A4188ACD01004D2BA0 /* libz.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.dylib; path = usr/lib/libz.dylib; sourceTree = SDKROOT; }; + FBBC53A6188AD1D3004D2BA0 /* StreamFrameViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StreamFrameViewController.h; sourceTree = ""; }; + FBBC53A7188AD1D3004D2BA0 /* StreamFrameViewController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = StreamFrameViewController.m; sourceTree = ""; }; + FBBC53AC188AE27D004D2BA0 /* StreamView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StreamView.h; sourceTree = ""; }; + FBBC53AD188AE27D004D2BA0 /* StreamView.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = StreamView.m; sourceTree = ""; }; + FBBC53CE188BA7C7004D2BA0 /* opus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus.h; sourceTree = ""; }; + FBBC53CF188BA7C7004D2BA0 /* opus_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_defines.h; sourceTree = ""; }; + FBBC53D0188BA7C7004D2BA0 /* opus_multistream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_multistream.h; sourceTree = ""; }; + FBBC53D1188BA7C7004D2BA0 /* opus_types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_types.h; sourceTree = ""; }; + FBBC53D3188BA7C7004D2BA0 /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libopus.a; sourceTree = ""; }; + FBBC53D4188BA7C7004D2BA0 /* libopus.la */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libopus.la; sourceTree = ""; }; + FBBC53D6188BA7C7004D2BA0 /* opus.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = opus.pc; sourceTree = ""; }; + FBBC53DA188BA7C7004D2BA0 /* opus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus.h; sourceTree = ""; }; + FBBC53DB188BA7C7004D2BA0 /* opus_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_defines.h; sourceTree = ""; }; + FBBC53DC188BA7C7004D2BA0 /* opus_multistream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_multistream.h; sourceTree = ""; }; + FBBC53DD188BA7C7004D2BA0 /* opus_types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opus_types.h; sourceTree = ""; }; + FBBC53DF188BA7C7004D2BA0 /* libopus.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libopus.a; sourceTree = ""; }; + FBBC53E0188BA7C7004D2BA0 /* libopus.la */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libopus.la; sourceTree = ""; }; + FBBC53E2188BA7C7004D2BA0 /* opus.pc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = opus.pc; sourceTree = ""; }; + FBBC53E9188BA7FC004D2BA0 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavcodec.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavcodec.a"; sourceTree = ""; }; + FBBC53EA188BA7FC004D2BA0 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavdevice.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavdevice.a"; sourceTree = ""; }; + FBBC53EB188BA7FC004D2BA0 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavfilter.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavfilter.a"; sourceTree = ""; }; + FBBC53EC188BA7FC004D2BA0 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavformat.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavformat.a"; sourceTree = ""; }; + FBBC53ED188BA7FC004D2BA0 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavresample.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavresample.a"; sourceTree = ""; }; + FBBC53EE188BA7FC004D2BA0 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavutil.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libavutil.a"; sourceTree = ""; }; + FBBC53EF188BA7FC004D2BA0 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libswresample.a"; sourceTree = ""; }; + FBBC53F0188BA7FC004D2BA0 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswscale.a; path = "Limelight-iOS/libs/ffmpeg/armv7/lib/libswscale.a"; sourceTree = ""; }; + FBC18AE2188A2AB500D5D34E /* MainFrame.storyboard */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.storyboard; name = MainFrame.storyboard; path = ../MainFrame.storyboard; sourceTree = ""; }; + FBC18B29188A3B9100D5D34E /* MainFrameViewController.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MainFrameViewController.h; sourceTree = ""; }; + FBC18B2A188A3B9100D5D34E /* MainFrameViewController.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = MainFrameViewController.m; sourceTree = ""; }; + FBC18B2D188A4E0500D5D34E /* VideoDepacketizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = VideoDepacketizer.h; path = Video/VideoDepacketizer.h; sourceTree = ""; }; + FBC18B2E188A4E0500D5D34E /* VideoDepacketizer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = VideoDepacketizer.m; path = Video/VideoDepacketizer.m; sourceTree = ""; }; + FBC18B30188A5A1100D5D34E /* VideoDecoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VideoDecoder.h; sourceTree = ""; }; + FBC18B31188A5A1100D5D34E /* VideoDecoder.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = VideoDecoder.m; sourceTree = ""; }; + FBF6AE71188A274100B50578 /* Limelight-iOS.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = "Limelight-iOS.app"; sourceTree = BUILT_PRODUCTS_DIR; }; + FBF6AE74188A274100B50578 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; }; + FBF6AE76188A274100B50578 /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; }; + FBF6AE78188A274100B50578 /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; }; + FBF6AE7A188A274100B50578 /* CoreData.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreData.framework; path = System/Library/Frameworks/CoreData.framework; sourceTree = SDKROOT; }; + FBF6AE7E188A274100B50578 /* Limelight-iOS-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Limelight-iOS-Info.plist"; sourceTree = ""; }; + FBF6AE80188A274100B50578 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FBF6AE82188A274100B50578 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + FBF6AE84188A274100B50578 /* Limelight-iOS-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "Limelight-iOS-Prefix.pch"; sourceTree = ""; }; + FBF6AE85188A274100B50578 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; + FBF6AE86188A274100B50578 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; + FBF6AE89188A274100B50578 /* Limelight_iOS.xcdatamodel */ = {isa = PBXFileReference; lastKnownFileType = wrapper.xcdatamodel; path = Limelight_iOS.xcdatamodel; sourceTree = ""; }; + FBF6AE8B188A274100B50578 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + FBF6AE91188A274100B50578 /* Limelight-iOSTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = "Limelight-iOSTests.xctest"; sourceTree = BUILT_PRODUCTS_DIR; }; + FBF6AE92188A274100B50578 /* XCTest.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = XCTest.framework; path = Library/Frameworks/XCTest.framework; sourceTree = DEVELOPER_DIR; }; + FBF6AE9B188A274100B50578 /* Limelight-iOSTests-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Limelight-iOSTests-Info.plist"; sourceTree = ""; }; + FBF6AE9D188A274100B50578 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = ""; }; + FBF6AE9F188A274100B50578 /* Limelight_iOSTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Limelight_iOSTests.m; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + FBF6AE6E188A274100B50578 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + FB05E4C51899EC7F00E0CAC7 /* libxml2.dylib in Frameworks */, + FB05E4C01899730B00E0CAC7 /* AdSupport.framework in Frameworks */, + FBBC0240188E13A800CEB8E5 /* liblimelight-common.a in Frameworks */, + FBBC53A5188ACD01004D2BA0 /* libz.dylib in Frameworks */, + FBBC53F9188BA82E004D2BA0 /* libopus.a in Frameworks */, + FBBC53F1188BA7FC004D2BA0 /* libavcodec.a in Frameworks */, + FBBC53F2188BA7FC004D2BA0 /* libavdevice.a in Frameworks */, + FBBC53F3188BA7FC004D2BA0 /* libavfilter.a in Frameworks */, + FBBC53F4188BA7FC004D2BA0 /* libavformat.a in Frameworks */, + FBBC53F5188BA7FC004D2BA0 /* libavresample.a in Frameworks */, + FBBC53F6188BA7FC004D2BA0 /* libavutil.a in Frameworks */, + FBBC53F7188BA7FC004D2BA0 /* libswresample.a in Frameworks */, + FBBC53F8188BA7FC004D2BA0 /* libswscale.a in Frameworks */, + FBF6AE77188A274100B50578 /* CoreGraphics.framework in Frameworks */, + FBF6AE7B188A274100B50578 /* CoreData.framework in Frameworks */, + FBF6AE79188A274100B50578 /* UIKit.framework in Frameworks */, + FBF6AE75188A274100B50578 /* Foundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FBF6AE8E188A274100B50578 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + FBF6AE93188A274100B50578 /* XCTest.framework in Frameworks */, + FBF6AE96188A274100B50578 /* CoreData.framework in Frameworks */, + FBF6AE95188A274100B50578 /* UIKit.framework in Frameworks */, + FBF6AE94188A274100B50578 /* Foundation.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + FB27273B188A8BB20093CCC9 /* libs */ = { + isa = PBXGroup; + children = ( + FBBC53CA188BA7C7004D2BA0 /* opus */, + FBBC53C8188BA7AF004D2BA0 /* ffmpeg */, + ); + path = libs; + sourceTree = ""; + }; + FB27273C188A8BB20093CCC9 /* armv7 */ = { + isa = PBXGroup; + children = ( + FB27273D188A8BB20093CCC9 /* include */, + FB272795188A8BB20093CCC9 /* lib */, + ); + path = armv7; + sourceTree = ""; + }; + FB27273D188A8BB20093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB27273E188A8BB20093CCC9 /* libavcodec */, + FB272748188A8BB20093CCC9 /* libavdevice */, + FB27274B188A8BB20093CCC9 /* libavfilter */, + FB272753188A8BB20093CCC9 /* libavformat */, + FB272757188A8BB20093CCC9 /* libavresample */, + FB27275A188A8BB20093CCC9 /* libavutil */, + FB27278F188A8BB20093CCC9 /* libswresample */, + FB272792188A8BB20093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB27273E188A8BB20093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB27273F188A8BB20093CCC9 /* avcodec.h */, + FB272740188A8BB20093CCC9 /* avfft.h */, + FB272741188A8BB20093CCC9 /* dxva2.h */, + FB272742188A8BB20093CCC9 /* old_codec_ids.h */, + FB272743188A8BB20093CCC9 /* vaapi.h */, + FB272744188A8BB20093CCC9 /* vda.h */, + FB272745188A8BB20093CCC9 /* vdpau.h */, + FB272746188A8BB20093CCC9 /* version.h */, + FB272747188A8BB20093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB272748188A8BB20093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB272749188A8BB20093CCC9 /* avdevice.h */, + FB27274A188A8BB20093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB27274B188A8BB20093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB27274C188A8BB20093CCC9 /* asrc_abuffer.h */, + FB27274D188A8BB20093CCC9 /* avcodec.h */, + FB27274E188A8BB20093CCC9 /* avfilter.h */, + FB27274F188A8BB20093CCC9 /* avfiltergraph.h */, + FB272750188A8BB20093CCC9 /* buffersink.h */, + FB272751188A8BB20093CCC9 /* buffersrc.h */, + FB272752188A8BB20093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272753188A8BB20093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB272754188A8BB20093CCC9 /* avformat.h */, + FB272755188A8BB20093CCC9 /* avio.h */, + FB272756188A8BB20093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB272757188A8BB20093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB272758188A8BB20093CCC9 /* avresample.h */, + FB272759188A8BB20093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB27275A188A8BB20093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB27275B188A8BB20093CCC9 /* adler32.h */, + FB27275C188A8BB20093CCC9 /* aes.h */, + FB27275D188A8BB20093CCC9 /* attributes.h */, + FB27275E188A8BB20093CCC9 /* audio_fifo.h */, + FB27275F188A8BB20093CCC9 /* audioconvert.h */, + FB272760188A8BB20093CCC9 /* avassert.h */, + FB272761188A8BB20093CCC9 /* avconfig.h */, + FB272762188A8BB20093CCC9 /* avstring.h */, + FB272763188A8BB20093CCC9 /* avutil.h */, + FB272764188A8BB20093CCC9 /* base64.h */, + FB272765188A8BB20093CCC9 /* blowfish.h */, + FB272766188A8BB20093CCC9 /* bprint.h */, + FB272767188A8BB20093CCC9 /* bswap.h */, + FB272768188A8BB20093CCC9 /* buffer.h */, + FB272769188A8BB20093CCC9 /* channel_layout.h */, + FB27276A188A8BB20093CCC9 /* common.h */, + FB27276B188A8BB20093CCC9 /* cpu.h */, + FB27276C188A8BB20093CCC9 /* crc.h */, + FB27276D188A8BB20093CCC9 /* dict.h */, + FB27276E188A8BB20093CCC9 /* error.h */, + FB27276F188A8BB20093CCC9 /* eval.h */, + FB272770188A8BB20093CCC9 /* fifo.h */, + FB272771188A8BB20093CCC9 /* file.h */, + FB272772188A8BB20093CCC9 /* frame.h */, + FB272773188A8BB20093CCC9 /* hmac.h */, + FB272774188A8BB20093CCC9 /* imgutils.h */, + FB272775188A8BB20093CCC9 /* intfloat.h */, + FB272776188A8BB20093CCC9 /* intfloat_readwrite.h */, + FB272777188A8BB20093CCC9 /* intreadwrite.h */, + FB272778188A8BB20093CCC9 /* lfg.h */, + FB272779188A8BB20093CCC9 /* log.h */, + FB27277A188A8BB20093CCC9 /* lzo.h */, + FB27277B188A8BB20093CCC9 /* mathematics.h */, + FB27277C188A8BB20093CCC9 /* md5.h */, + FB27277D188A8BB20093CCC9 /* mem.h */, + FB27277E188A8BB20093CCC9 /* murmur3.h */, + FB27277F188A8BB20093CCC9 /* old_pix_fmts.h */, + FB272780188A8BB20093CCC9 /* opt.h */, + FB272781188A8BB20093CCC9 /* parseutils.h */, + FB272782188A8BB20093CCC9 /* pixdesc.h */, + FB272783188A8BB20093CCC9 /* pixfmt.h */, + FB272784188A8BB20093CCC9 /* random_seed.h */, + FB272785188A8BB20093CCC9 /* rational.h */, + FB272786188A8BB20093CCC9 /* ripemd.h */, + FB272787188A8BB20093CCC9 /* samplefmt.h */, + FB272788188A8BB20093CCC9 /* sha.h */, + FB272789188A8BB20093CCC9 /* sha512.h */, + FB27278A188A8BB20093CCC9 /* time.h */, + FB27278B188A8BB20093CCC9 /* timecode.h */, + FB27278C188A8BB20093CCC9 /* timestamp.h */, + FB27278D188A8BB20093CCC9 /* version.h */, + FB27278E188A8BB20093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB27278F188A8BB20093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB272790188A8BB20093CCC9 /* swresample.h */, + FB272791188A8BB20093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB272792188A8BB20093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB272793188A8BB20093CCC9 /* swscale.h */, + FB272794188A8BB20093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB272795188A8BB20093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB272796188A8BB20093CCC9 /* libavcodec.a */, + FB272797188A8BB20093CCC9 /* libavdevice.a */, + FB272798188A8BB20093CCC9 /* libavfilter.a */, + FB272799188A8BB20093CCC9 /* libavformat.a */, + FB27279A188A8BB20093CCC9 /* libavresample.a */, + FB27279B188A8BB20093CCC9 /* libavutil.a */, + FB27279C188A8BB20093CCC9 /* libswresample.a */, + FB27279D188A8BB20093CCC9 /* libswscale.a */, + FB27279E188A8BB20093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB27279E188A8BB20093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB27279F188A8BB20093CCC9 /* libavcodec.pc */, + FB2727A0188A8BB20093CCC9 /* libavdevice.pc */, + FB2727A1188A8BB20093CCC9 /* libavfilter.pc */, + FB2727A2188A8BB20093CCC9 /* libavformat.pc */, + FB2727A3188A8BB20093CCC9 /* libavresample.pc */, + FB2727A4188A8BB20093CCC9 /* libavutil.pc */, + FB2727A5188A8BB20093CCC9 /* libswresample.pc */, + FB2727A6188A8BB20093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB2727A7188A8BB20093CCC9 /* armv7s */ = { + isa = PBXGroup; + children = ( + FB2727A8188A8BB20093CCC9 /* include */, + FB272800188A8BB20093CCC9 /* lib */, + ); + path = armv7s; + sourceTree = ""; + }; + FB2727A8188A8BB20093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB2727A9188A8BB20093CCC9 /* libavcodec */, + FB2727B3188A8BB20093CCC9 /* libavdevice */, + FB2727B6188A8BB20093CCC9 /* libavfilter */, + FB2727BE188A8BB20093CCC9 /* libavformat */, + FB2727C2188A8BB20093CCC9 /* libavresample */, + FB2727C5188A8BB20093CCC9 /* libavutil */, + FB2727FA188A8BB20093CCC9 /* libswresample */, + FB2727FD188A8BB20093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB2727A9188A8BB20093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB2727AA188A8BB20093CCC9 /* avcodec.h */, + FB2727AB188A8BB20093CCC9 /* avfft.h */, + FB2727AC188A8BB20093CCC9 /* dxva2.h */, + FB2727AD188A8BB20093CCC9 /* old_codec_ids.h */, + FB2727AE188A8BB20093CCC9 /* vaapi.h */, + FB2727AF188A8BB20093CCC9 /* vda.h */, + FB2727B0188A8BB20093CCC9 /* vdpau.h */, + FB2727B1188A8BB20093CCC9 /* version.h */, + FB2727B2188A8BB20093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB2727B3188A8BB20093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB2727B4188A8BB20093CCC9 /* avdevice.h */, + FB2727B5188A8BB20093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB2727B6188A8BB20093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB2727B7188A8BB20093CCC9 /* asrc_abuffer.h */, + FB2727B8188A8BB20093CCC9 /* avcodec.h */, + FB2727B9188A8BB20093CCC9 /* avfilter.h */, + FB2727BA188A8BB20093CCC9 /* avfiltergraph.h */, + FB2727BB188A8BB20093CCC9 /* buffersink.h */, + FB2727BC188A8BB20093CCC9 /* buffersrc.h */, + FB2727BD188A8BB20093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB2727BE188A8BB20093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB2727BF188A8BB20093CCC9 /* avformat.h */, + FB2727C0188A8BB20093CCC9 /* avio.h */, + FB2727C1188A8BB20093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB2727C2188A8BB20093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB2727C3188A8BB20093CCC9 /* avresample.h */, + FB2727C4188A8BB20093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB2727C5188A8BB20093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB2727C6188A8BB20093CCC9 /* adler32.h */, + FB2727C7188A8BB20093CCC9 /* aes.h */, + FB2727C8188A8BB20093CCC9 /* attributes.h */, + FB2727C9188A8BB20093CCC9 /* audio_fifo.h */, + FB2727CA188A8BB20093CCC9 /* audioconvert.h */, + FB2727CB188A8BB20093CCC9 /* avassert.h */, + FB2727CC188A8BB20093CCC9 /* avconfig.h */, + FB2727CD188A8BB20093CCC9 /* avstring.h */, + FB2727CE188A8BB20093CCC9 /* avutil.h */, + FB2727CF188A8BB20093CCC9 /* base64.h */, + FB2727D0188A8BB20093CCC9 /* blowfish.h */, + FB2727D1188A8BB20093CCC9 /* bprint.h */, + FB2727D2188A8BB20093CCC9 /* bswap.h */, + FB2727D3188A8BB20093CCC9 /* buffer.h */, + FB2727D4188A8BB20093CCC9 /* channel_layout.h */, + FB2727D5188A8BB20093CCC9 /* common.h */, + FB2727D6188A8BB20093CCC9 /* cpu.h */, + FB2727D7188A8BB20093CCC9 /* crc.h */, + FB2727D8188A8BB20093CCC9 /* dict.h */, + FB2727D9188A8BB20093CCC9 /* error.h */, + FB2727DA188A8BB20093CCC9 /* eval.h */, + FB2727DB188A8BB20093CCC9 /* fifo.h */, + FB2727DC188A8BB20093CCC9 /* file.h */, + FB2727DD188A8BB20093CCC9 /* frame.h */, + FB2727DE188A8BB20093CCC9 /* hmac.h */, + FB2727DF188A8BB20093CCC9 /* imgutils.h */, + FB2727E0188A8BB20093CCC9 /* intfloat.h */, + FB2727E1188A8BB20093CCC9 /* intfloat_readwrite.h */, + FB2727E2188A8BB20093CCC9 /* intreadwrite.h */, + FB2727E3188A8BB20093CCC9 /* lfg.h */, + FB2727E4188A8BB20093CCC9 /* log.h */, + FB2727E5188A8BB20093CCC9 /* lzo.h */, + FB2727E6188A8BB20093CCC9 /* mathematics.h */, + FB2727E7188A8BB20093CCC9 /* md5.h */, + FB2727E8188A8BB20093CCC9 /* mem.h */, + FB2727E9188A8BB20093CCC9 /* murmur3.h */, + FB2727EA188A8BB20093CCC9 /* old_pix_fmts.h */, + FB2727EB188A8BB20093CCC9 /* opt.h */, + FB2727EC188A8BB20093CCC9 /* parseutils.h */, + FB2727ED188A8BB20093CCC9 /* pixdesc.h */, + FB2727EE188A8BB20093CCC9 /* pixfmt.h */, + FB2727EF188A8BB20093CCC9 /* random_seed.h */, + FB2727F0188A8BB20093CCC9 /* rational.h */, + FB2727F1188A8BB20093CCC9 /* ripemd.h */, + FB2727F2188A8BB20093CCC9 /* samplefmt.h */, + FB2727F3188A8BB20093CCC9 /* sha.h */, + FB2727F4188A8BB20093CCC9 /* sha512.h */, + FB2727F5188A8BB20093CCC9 /* time.h */, + FB2727F6188A8BB20093CCC9 /* timecode.h */, + FB2727F7188A8BB20093CCC9 /* timestamp.h */, + FB2727F8188A8BB20093CCC9 /* version.h */, + FB2727F9188A8BB20093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB2727FA188A8BB20093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB2727FB188A8BB20093CCC9 /* swresample.h */, + FB2727FC188A8BB20093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB2727FD188A8BB20093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB2727FE188A8BB20093CCC9 /* swscale.h */, + FB2727FF188A8BB20093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB272800188A8BB20093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB272801188A8BB20093CCC9 /* libavcodec.a */, + FB272802188A8BB20093CCC9 /* libavdevice.a */, + FB272803188A8BB20093CCC9 /* libavfilter.a */, + FB272804188A8BB20093CCC9 /* libavformat.a */, + FB272805188A8BB20093CCC9 /* libavresample.a */, + FB272806188A8BB20093CCC9 /* libavutil.a */, + FB272807188A8BB20093CCC9 /* libswresample.a */, + FB272808188A8BB20093CCC9 /* libswscale.a */, + FB272809188A8BB20093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB272809188A8BB20093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB27280A188A8BB20093CCC9 /* libavcodec.pc */, + FB27280B188A8BB20093CCC9 /* libavdevice.pc */, + FB27280C188A8BB20093CCC9 /* libavfilter.pc */, + FB27280D188A8BB20093CCC9 /* libavformat.pc */, + FB27280E188A8BB20093CCC9 /* libavresample.pc */, + FB27280F188A8BB20093CCC9 /* libavutil.pc */, + FB272810188A8BB20093CCC9 /* libswresample.pc */, + FB272811188A8BB20093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB272812188A8BB20093CCC9 /* i386 */ = { + isa = PBXGroup; + children = ( + FB272813188A8BB20093CCC9 /* include */, + FB27286B188A8BB20093CCC9 /* lib */, + ); + path = i386; + sourceTree = ""; + }; + FB272813188A8BB20093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB272814188A8BB20093CCC9 /* libavcodec */, + FB27281E188A8BB20093CCC9 /* libavdevice */, + FB272821188A8BB20093CCC9 /* libavfilter */, + FB272829188A8BB20093CCC9 /* libavformat */, + FB27282D188A8BB20093CCC9 /* libavresample */, + FB272830188A8BB20093CCC9 /* libavutil */, + FB272865188A8BB20093CCC9 /* libswresample */, + FB272868188A8BB20093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB272814188A8BB20093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB272815188A8BB20093CCC9 /* avcodec.h */, + FB272816188A8BB20093CCC9 /* avfft.h */, + FB272817188A8BB20093CCC9 /* dxva2.h */, + FB272818188A8BB20093CCC9 /* old_codec_ids.h */, + FB272819188A8BB20093CCC9 /* vaapi.h */, + FB27281A188A8BB20093CCC9 /* vda.h */, + FB27281B188A8BB20093CCC9 /* vdpau.h */, + FB27281C188A8BB20093CCC9 /* version.h */, + FB27281D188A8BB20093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB27281E188A8BB20093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB27281F188A8BB20093CCC9 /* avdevice.h */, + FB272820188A8BB20093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB272821188A8BB20093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB272822188A8BB20093CCC9 /* asrc_abuffer.h */, + FB272823188A8BB20093CCC9 /* avcodec.h */, + FB272824188A8BB20093CCC9 /* avfilter.h */, + FB272825188A8BB20093CCC9 /* avfiltergraph.h */, + FB272826188A8BB20093CCC9 /* buffersink.h */, + FB272827188A8BB20093CCC9 /* buffersrc.h */, + FB272828188A8BB20093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272829188A8BB20093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB27282A188A8BB20093CCC9 /* avformat.h */, + FB27282B188A8BB20093CCC9 /* avio.h */, + FB27282C188A8BB20093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB27282D188A8BB20093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB27282E188A8BB20093CCC9 /* avresample.h */, + FB27282F188A8BB20093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB272830188A8BB20093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB272831188A8BB20093CCC9 /* adler32.h */, + FB272832188A8BB20093CCC9 /* aes.h */, + FB272833188A8BB20093CCC9 /* attributes.h */, + FB272834188A8BB20093CCC9 /* audio_fifo.h */, + FB272835188A8BB20093CCC9 /* audioconvert.h */, + FB272836188A8BB20093CCC9 /* avassert.h */, + FB272837188A8BB20093CCC9 /* avconfig.h */, + FB272838188A8BB20093CCC9 /* avstring.h */, + FB272839188A8BB20093CCC9 /* avutil.h */, + FB27283A188A8BB20093CCC9 /* base64.h */, + FB27283B188A8BB20093CCC9 /* blowfish.h */, + FB27283C188A8BB20093CCC9 /* bprint.h */, + FB27283D188A8BB20093CCC9 /* bswap.h */, + FB27283E188A8BB20093CCC9 /* buffer.h */, + FB27283F188A8BB20093CCC9 /* channel_layout.h */, + FB272840188A8BB20093CCC9 /* common.h */, + FB272841188A8BB20093CCC9 /* cpu.h */, + FB272842188A8BB20093CCC9 /* crc.h */, + FB272843188A8BB20093CCC9 /* dict.h */, + FB272844188A8BB20093CCC9 /* error.h */, + FB272845188A8BB20093CCC9 /* eval.h */, + FB272846188A8BB20093CCC9 /* fifo.h */, + FB272847188A8BB20093CCC9 /* file.h */, + FB272848188A8BB20093CCC9 /* frame.h */, + FB272849188A8BB20093CCC9 /* hmac.h */, + FB27284A188A8BB20093CCC9 /* imgutils.h */, + FB27284B188A8BB20093CCC9 /* intfloat.h */, + FB27284C188A8BB20093CCC9 /* intfloat_readwrite.h */, + FB27284D188A8BB20093CCC9 /* intreadwrite.h */, + FB27284E188A8BB20093CCC9 /* lfg.h */, + FB27284F188A8BB20093CCC9 /* log.h */, + FB272850188A8BB20093CCC9 /* lzo.h */, + FB272851188A8BB20093CCC9 /* mathematics.h */, + FB272852188A8BB20093CCC9 /* md5.h */, + FB272853188A8BB20093CCC9 /* mem.h */, + FB272854188A8BB20093CCC9 /* murmur3.h */, + FB272855188A8BB20093CCC9 /* old_pix_fmts.h */, + FB272856188A8BB20093CCC9 /* opt.h */, + FB272857188A8BB20093CCC9 /* parseutils.h */, + FB272858188A8BB20093CCC9 /* pixdesc.h */, + FB272859188A8BB20093CCC9 /* pixfmt.h */, + FB27285A188A8BB20093CCC9 /* random_seed.h */, + FB27285B188A8BB20093CCC9 /* rational.h */, + FB27285C188A8BB20093CCC9 /* ripemd.h */, + FB27285D188A8BB20093CCC9 /* samplefmt.h */, + FB27285E188A8BB20093CCC9 /* sha.h */, + FB27285F188A8BB20093CCC9 /* sha512.h */, + FB272860188A8BB20093CCC9 /* time.h */, + FB272861188A8BB20093CCC9 /* timecode.h */, + FB272862188A8BB20093CCC9 /* timestamp.h */, + FB272863188A8BB20093CCC9 /* version.h */, + FB272864188A8BB20093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB272865188A8BB20093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB272866188A8BB20093CCC9 /* swresample.h */, + FB272867188A8BB20093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB272868188A8BB20093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB272869188A8BB20093CCC9 /* swscale.h */, + FB27286A188A8BB20093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB27286B188A8BB20093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB27286C188A8BB20093CCC9 /* libavcodec.a */, + FB27286D188A8BB20093CCC9 /* libavdevice.a */, + FB27286E188A8BB20093CCC9 /* libavfilter.a */, + FB27286F188A8BB20093CCC9 /* libavformat.a */, + FB272870188A8BB20093CCC9 /* libavresample.a */, + FB272871188A8BB20093CCC9 /* libavutil.a */, + FB272872188A8BB20093CCC9 /* libswresample.a */, + FB272873188A8BB20093CCC9 /* libswscale.a */, + FB272874188A8BB20093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB272874188A8BB20093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB272875188A8BB20093CCC9 /* libavcodec.pc */, + FB272876188A8BB20093CCC9 /* libavdevice.pc */, + FB272877188A8BB20093CCC9 /* libavfilter.pc */, + FB272878188A8BB20093CCC9 /* libavformat.pc */, + FB272879188A8BB20093CCC9 /* libavresample.pc */, + FB27287A188A8BB20093CCC9 /* libavutil.pc */, + FB27287B188A8BB20093CCC9 /* libswresample.pc */, + FB27287C188A8BB20093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB27287D188A8BB20093CCC9 /* universal */ = { + isa = PBXGroup; + children = ( + FB27287E188A8BB20093CCC9 /* include */, + FB2728D6188A8BB20093CCC9 /* lib */, + ); + path = universal; + sourceTree = ""; + }; + FB27287E188A8BB20093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB27287F188A8BB20093CCC9 /* libavcodec */, + FB272889188A8BB20093CCC9 /* libavdevice */, + FB27288C188A8BB20093CCC9 /* libavfilter */, + FB272894188A8BB20093CCC9 /* libavformat */, + FB272898188A8BB20093CCC9 /* libavresample */, + FB27289B188A8BB20093CCC9 /* libavutil */, + FB2728D0188A8BB20093CCC9 /* libswresample */, + FB2728D3188A8BB20093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB27287F188A8BB20093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB272880188A8BB20093CCC9 /* avcodec.h */, + FB272881188A8BB20093CCC9 /* avfft.h */, + FB272882188A8BB20093CCC9 /* dxva2.h */, + FB272883188A8BB20093CCC9 /* old_codec_ids.h */, + FB272884188A8BB20093CCC9 /* vaapi.h */, + FB272885188A8BB20093CCC9 /* vda.h */, + FB272886188A8BB20093CCC9 /* vdpau.h */, + FB272887188A8BB20093CCC9 /* version.h */, + FB272888188A8BB20093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB272889188A8BB20093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB27288A188A8BB20093CCC9 /* avdevice.h */, + FB27288B188A8BB20093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB27288C188A8BB20093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB27288D188A8BB20093CCC9 /* asrc_abuffer.h */, + FB27288E188A8BB20093CCC9 /* avcodec.h */, + FB27288F188A8BB20093CCC9 /* avfilter.h */, + FB272890188A8BB20093CCC9 /* avfiltergraph.h */, + FB272891188A8BB20093CCC9 /* buffersink.h */, + FB272892188A8BB20093CCC9 /* buffersrc.h */, + FB272893188A8BB20093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272894188A8BB20093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB272895188A8BB20093CCC9 /* avformat.h */, + FB272896188A8BB20093CCC9 /* avio.h */, + FB272897188A8BB20093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB272898188A8BB20093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB272899188A8BB20093CCC9 /* avresample.h */, + FB27289A188A8BB20093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB27289B188A8BB20093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB27289C188A8BB20093CCC9 /* adler32.h */, + FB27289D188A8BB20093CCC9 /* aes.h */, + FB27289E188A8BB20093CCC9 /* attributes.h */, + FB27289F188A8BB20093CCC9 /* audio_fifo.h */, + FB2728A0188A8BB20093CCC9 /* audioconvert.h */, + FB2728A1188A8BB20093CCC9 /* avassert.h */, + FB2728A2188A8BB20093CCC9 /* avconfig.h */, + FB2728A3188A8BB20093CCC9 /* avstring.h */, + FB2728A4188A8BB20093CCC9 /* avutil.h */, + FB2728A5188A8BB20093CCC9 /* base64.h */, + FB2728A6188A8BB20093CCC9 /* blowfish.h */, + FB2728A7188A8BB20093CCC9 /* bprint.h */, + FB2728A8188A8BB20093CCC9 /* bswap.h */, + FB2728A9188A8BB20093CCC9 /* buffer.h */, + FB2728AA188A8BB20093CCC9 /* channel_layout.h */, + FB2728AB188A8BB20093CCC9 /* common.h */, + FB2728AC188A8BB20093CCC9 /* cpu.h */, + FB2728AD188A8BB20093CCC9 /* crc.h */, + FB2728AE188A8BB20093CCC9 /* dict.h */, + FB2728AF188A8BB20093CCC9 /* error.h */, + FB2728B0188A8BB20093CCC9 /* eval.h */, + FB2728B1188A8BB20093CCC9 /* fifo.h */, + FB2728B2188A8BB20093CCC9 /* file.h */, + FB2728B3188A8BB20093CCC9 /* frame.h */, + FB2728B4188A8BB20093CCC9 /* hmac.h */, + FB2728B5188A8BB20093CCC9 /* imgutils.h */, + FB2728B6188A8BB20093CCC9 /* intfloat.h */, + FB2728B7188A8BB20093CCC9 /* intfloat_readwrite.h */, + FB2728B8188A8BB20093CCC9 /* intreadwrite.h */, + FB2728B9188A8BB20093CCC9 /* lfg.h */, + FB2728BA188A8BB20093CCC9 /* log.h */, + FB2728BB188A8BB20093CCC9 /* lzo.h */, + FB2728BC188A8BB20093CCC9 /* mathematics.h */, + FB2728BD188A8BB20093CCC9 /* md5.h */, + FB2728BE188A8BB20093CCC9 /* mem.h */, + FB2728BF188A8BB20093CCC9 /* murmur3.h */, + FB2728C0188A8BB20093CCC9 /* old_pix_fmts.h */, + FB2728C1188A8BB20093CCC9 /* opt.h */, + FB2728C2188A8BB20093CCC9 /* parseutils.h */, + FB2728C3188A8BB20093CCC9 /* pixdesc.h */, + FB2728C4188A8BB20093CCC9 /* pixfmt.h */, + FB2728C5188A8BB20093CCC9 /* random_seed.h */, + FB2728C6188A8BB20093CCC9 /* rational.h */, + FB2728C7188A8BB20093CCC9 /* ripemd.h */, + FB2728C8188A8BB20093CCC9 /* samplefmt.h */, + FB2728C9188A8BB20093CCC9 /* sha.h */, + FB2728CA188A8BB20093CCC9 /* sha512.h */, + FB2728CB188A8BB20093CCC9 /* time.h */, + FB2728CC188A8BB20093CCC9 /* timecode.h */, + FB2728CD188A8BB20093CCC9 /* timestamp.h */, + FB2728CE188A8BB20093CCC9 /* version.h */, + FB2728CF188A8BB20093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB2728D0188A8BB20093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB2728D1188A8BB20093CCC9 /* swresample.h */, + FB2728D2188A8BB20093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB2728D3188A8BB20093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB2728D4188A8BB20093CCC9 /* swscale.h */, + FB2728D5188A8BB20093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB2728D6188A8BB20093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB2728D7188A8BB20093CCC9 /* libavcodec.a */, + FB2728D8188A8BB20093CCC9 /* libavdevice.a */, + FB2728D9188A8BB20093CCC9 /* libavfilter.a */, + FB2728DA188A8BB20093CCC9 /* libavformat.a */, + FB2728DB188A8BB20093CCC9 /* libavresample.a */, + FB2728DC188A8BB20093CCC9 /* libavutil.a */, + FB2728DD188A8BB20093CCC9 /* libswresample.a */, + FB2728DE188A8BB20093CCC9 /* libswscale.a */, + ); + path = lib; + sourceTree = ""; + }; + FB272917188A93360093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272918188A93360093CCC9 /* AppDelegate.h */, + FB272919188A93360093CCC9 /* AppDelegate.m */, + FB27291A188A93360093CCC9 /* InfoPlist.strings */, + FB27291C188A93360093CCC9 /* Icons */, + FB27292E188A93360093CCC9 /* Images.xcassets */, + FB27292F188A93360093CCC9 /* libs */, + FB272AD3188A93370093CCC9 /* Limelight-iOS */, + FB272BBA188A93370093CCC9 /* Limelight-iOS-Info.plist */, + FB272BBB188A93370093CCC9 /* Limelight-iOS-Prefix.pch */, + FB272BBC188A93370093CCC9 /* Limelight_iOS.xcdatamodeld */, + FB272BBE188A93370093CCC9 /* main.m */, + FB272BBF188A93370093CCC9 /* MainFrameViewController.h */, + FB272BC0188A93370093CCC9 /* MainFrameViewController.m */, + FB272BC1188A93370093CCC9 /* notpadded.h264 */, + FB272BC2188A93370093CCC9 /* Video */, + FB272BC5188A93370093CCC9 /* VideoDecoder.h */, + FB272BC6188A93370093CCC9 /* VideoDecoder.m */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB27291C188A93360093CCC9 /* Icons */ = { + isa = PBXGroup; + children = ( + FB27291D188A93360093CCC9 /* ios7 */, + ); + path = Icons; + sourceTree = ""; + }; + FB27291D188A93360093CCC9 /* ios7 */ = { + isa = PBXGroup; + children = ( + FB27291E188A93360093CCC9 /* Icon-40.png */, + FB27291F188A93360093CCC9 /* Icon-40@2x.png */, + FB272920188A93360093CCC9 /* Icon-60.png */, + FB272921188A93360093CCC9 /* Icon-60@2x.png */, + FB272922188A93360093CCC9 /* Icon-72.png */, + FB272923188A93360093CCC9 /* Icon-72@2x.png */, + FB272924188A93360093CCC9 /* Icon-76.png */, + FB272925188A93360093CCC9 /* Icon-76@2x.png */, + FB272926188A93360093CCC9 /* Icon-Small-50.png */, + FB272927188A93360093CCC9 /* Icon-Small-50@2x.png */, + FB272928188A93360093CCC9 /* Icon-Small.png */, + FB272929188A93360093CCC9 /* Icon-Small@2x.png */, + FB27292A188A93360093CCC9 /* Icon.png */, + FB27292B188A93360093CCC9 /* Icon@2x.png */, + FB27292C188A93360093CCC9 /* iTunesArtwork.png */, + FB27292D188A93360093CCC9 /* iTunesArtwork@2x.png */, + ); + path = ios7; + sourceTree = ""; + }; + FB27292F188A93360093CCC9 /* libs */ = { + isa = PBXGroup; + children = ( + FB272930188A93360093CCC9 /* armv7 */, + FB27299B188A93360093CCC9 /* armv7s */, + FB272A06188A93370093CCC9 /* i386 */, + FB272A71188A93370093CCC9 /* universal */, + ); + path = libs; + sourceTree = ""; + }; + FB272930188A93360093CCC9 /* armv7 */ = { + isa = PBXGroup; + children = ( + FB272931188A93360093CCC9 /* include */, + FB272989188A93360093CCC9 /* lib */, + ); + path = armv7; + sourceTree = ""; + }; + FB272931188A93360093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB272932188A93360093CCC9 /* libavcodec */, + FB27293C188A93360093CCC9 /* libavdevice */, + FB27293F188A93360093CCC9 /* libavfilter */, + FB272947188A93360093CCC9 /* libavformat */, + FB27294B188A93360093CCC9 /* libavresample */, + FB27294E188A93360093CCC9 /* libavutil */, + FB272983188A93360093CCC9 /* libswresample */, + FB272986188A93360093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB272932188A93360093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB272933188A93360093CCC9 /* avcodec.h */, + FB272934188A93360093CCC9 /* avfft.h */, + FB272935188A93360093CCC9 /* dxva2.h */, + FB272936188A93360093CCC9 /* old_codec_ids.h */, + FB272937188A93360093CCC9 /* vaapi.h */, + FB272938188A93360093CCC9 /* vda.h */, + FB272939188A93360093CCC9 /* vdpau.h */, + FB27293A188A93360093CCC9 /* version.h */, + FB27293B188A93360093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB27293C188A93360093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB27293D188A93360093CCC9 /* avdevice.h */, + FB27293E188A93360093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB27293F188A93360093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB272940188A93360093CCC9 /* asrc_abuffer.h */, + FB272941188A93360093CCC9 /* avcodec.h */, + FB272942188A93360093CCC9 /* avfilter.h */, + FB272943188A93360093CCC9 /* avfiltergraph.h */, + FB272944188A93360093CCC9 /* buffersink.h */, + FB272945188A93360093CCC9 /* buffersrc.h */, + FB272946188A93360093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272947188A93360093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB272948188A93360093CCC9 /* avformat.h */, + FB272949188A93360093CCC9 /* avio.h */, + FB27294A188A93360093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB27294B188A93360093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB27294C188A93360093CCC9 /* avresample.h */, + FB27294D188A93360093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB27294E188A93360093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB27294F188A93360093CCC9 /* adler32.h */, + FB272950188A93360093CCC9 /* aes.h */, + FB272951188A93360093CCC9 /* attributes.h */, + FB272952188A93360093CCC9 /* audio_fifo.h */, + FB272953188A93360093CCC9 /* audioconvert.h */, + FB272954188A93360093CCC9 /* avassert.h */, + FB272955188A93360093CCC9 /* avconfig.h */, + FB272956188A93360093CCC9 /* avstring.h */, + FB272957188A93360093CCC9 /* avutil.h */, + FB272958188A93360093CCC9 /* base64.h */, + FB272959188A93360093CCC9 /* blowfish.h */, + FB27295A188A93360093CCC9 /* bprint.h */, + FB27295B188A93360093CCC9 /* bswap.h */, + FB27295C188A93360093CCC9 /* buffer.h */, + FB27295D188A93360093CCC9 /* channel_layout.h */, + FB27295E188A93360093CCC9 /* common.h */, + FB27295F188A93360093CCC9 /* cpu.h */, + FB272960188A93360093CCC9 /* crc.h */, + FB272961188A93360093CCC9 /* dict.h */, + FB272962188A93360093CCC9 /* error.h */, + FB272963188A93360093CCC9 /* eval.h */, + FB272964188A93360093CCC9 /* fifo.h */, + FB272965188A93360093CCC9 /* file.h */, + FB272966188A93360093CCC9 /* frame.h */, + FB272967188A93360093CCC9 /* hmac.h */, + FB272968188A93360093CCC9 /* imgutils.h */, + FB272969188A93360093CCC9 /* intfloat.h */, + FB27296A188A93360093CCC9 /* intfloat_readwrite.h */, + FB27296B188A93360093CCC9 /* intreadwrite.h */, + FB27296C188A93360093CCC9 /* lfg.h */, + FB27296D188A93360093CCC9 /* log.h */, + FB27296E188A93360093CCC9 /* lzo.h */, + FB27296F188A93360093CCC9 /* mathematics.h */, + FB272970188A93360093CCC9 /* md5.h */, + FB272971188A93360093CCC9 /* mem.h */, + FB272972188A93360093CCC9 /* murmur3.h */, + FB272973188A93360093CCC9 /* old_pix_fmts.h */, + FB272974188A93360093CCC9 /* opt.h */, + FB272975188A93360093CCC9 /* parseutils.h */, + FB272976188A93360093CCC9 /* pixdesc.h */, + FB272977188A93360093CCC9 /* pixfmt.h */, + FB272978188A93360093CCC9 /* random_seed.h */, + FB272979188A93360093CCC9 /* rational.h */, + FB27297A188A93360093CCC9 /* ripemd.h */, + FB27297B188A93360093CCC9 /* samplefmt.h */, + FB27297C188A93360093CCC9 /* sha.h */, + FB27297D188A93360093CCC9 /* sha512.h */, + FB27297E188A93360093CCC9 /* time.h */, + FB27297F188A93360093CCC9 /* timecode.h */, + FB272980188A93360093CCC9 /* timestamp.h */, + FB272981188A93360093CCC9 /* version.h */, + FB272982188A93360093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB272983188A93360093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB272984188A93360093CCC9 /* swresample.h */, + FB272985188A93360093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB272986188A93360093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB272987188A93360093CCC9 /* swscale.h */, + FB272988188A93360093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB272989188A93360093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB27298A188A93360093CCC9 /* libavcodec.a */, + FB27298B188A93360093CCC9 /* libavdevice.a */, + FB27298C188A93360093CCC9 /* libavfilter.a */, + FB27298D188A93360093CCC9 /* libavformat.a */, + FB27298E188A93360093CCC9 /* libavresample.a */, + FB27298F188A93360093CCC9 /* libavutil.a */, + FB272990188A93360093CCC9 /* libswresample.a */, + FB272991188A93360093CCC9 /* libswscale.a */, + FB272992188A93360093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB272992188A93360093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB272993188A93360093CCC9 /* libavcodec.pc */, + FB272994188A93360093CCC9 /* libavdevice.pc */, + FB272995188A93360093CCC9 /* libavfilter.pc */, + FB272996188A93360093CCC9 /* libavformat.pc */, + FB272997188A93360093CCC9 /* libavresample.pc */, + FB272998188A93360093CCC9 /* libavutil.pc */, + FB272999188A93360093CCC9 /* libswresample.pc */, + FB27299A188A93360093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB27299B188A93360093CCC9 /* armv7s */ = { + isa = PBXGroup; + children = ( + FB27299C188A93360093CCC9 /* include */, + FB2729F4188A93370093CCC9 /* lib */, + ); + path = armv7s; + sourceTree = ""; + }; + FB27299C188A93360093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB27299D188A93360093CCC9 /* libavcodec */, + FB2729A7188A93360093CCC9 /* libavdevice */, + FB2729AA188A93360093CCC9 /* libavfilter */, + FB2729B2188A93360093CCC9 /* libavformat */, + FB2729B6188A93360093CCC9 /* libavresample */, + FB2729B9188A93360093CCC9 /* libavutil */, + FB2729EE188A93360093CCC9 /* libswresample */, + FB2729F1188A93360093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB27299D188A93360093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB27299E188A93360093CCC9 /* avcodec.h */, + FB27299F188A93360093CCC9 /* avfft.h */, + FB2729A0188A93360093CCC9 /* dxva2.h */, + FB2729A1188A93360093CCC9 /* old_codec_ids.h */, + FB2729A2188A93360093CCC9 /* vaapi.h */, + FB2729A3188A93360093CCC9 /* vda.h */, + FB2729A4188A93360093CCC9 /* vdpau.h */, + FB2729A5188A93360093CCC9 /* version.h */, + FB2729A6188A93360093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB2729A7188A93360093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB2729A8188A93360093CCC9 /* avdevice.h */, + FB2729A9188A93360093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB2729AA188A93360093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB2729AB188A93360093CCC9 /* asrc_abuffer.h */, + FB2729AC188A93360093CCC9 /* avcodec.h */, + FB2729AD188A93360093CCC9 /* avfilter.h */, + FB2729AE188A93360093CCC9 /* avfiltergraph.h */, + FB2729AF188A93360093CCC9 /* buffersink.h */, + FB2729B0188A93360093CCC9 /* buffersrc.h */, + FB2729B1188A93360093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB2729B2188A93360093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB2729B3188A93360093CCC9 /* avformat.h */, + FB2729B4188A93360093CCC9 /* avio.h */, + FB2729B5188A93360093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB2729B6188A93360093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB2729B7188A93360093CCC9 /* avresample.h */, + FB2729B8188A93360093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB2729B9188A93360093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB2729BA188A93360093CCC9 /* adler32.h */, + FB2729BB188A93360093CCC9 /* aes.h */, + FB2729BC188A93360093CCC9 /* attributes.h */, + FB2729BD188A93360093CCC9 /* audio_fifo.h */, + FB2729BE188A93360093CCC9 /* audioconvert.h */, + FB2729BF188A93360093CCC9 /* avassert.h */, + FB2729C0188A93360093CCC9 /* avconfig.h */, + FB2729C1188A93360093CCC9 /* avstring.h */, + FB2729C2188A93360093CCC9 /* avutil.h */, + FB2729C3188A93360093CCC9 /* base64.h */, + FB2729C4188A93360093CCC9 /* blowfish.h */, + FB2729C5188A93360093CCC9 /* bprint.h */, + FB2729C6188A93360093CCC9 /* bswap.h */, + FB2729C7188A93360093CCC9 /* buffer.h */, + FB2729C8188A93360093CCC9 /* channel_layout.h */, + FB2729C9188A93360093CCC9 /* common.h */, + FB2729CA188A93360093CCC9 /* cpu.h */, + FB2729CB188A93360093CCC9 /* crc.h */, + FB2729CC188A93360093CCC9 /* dict.h */, + FB2729CD188A93360093CCC9 /* error.h */, + FB2729CE188A93360093CCC9 /* eval.h */, + FB2729CF188A93360093CCC9 /* fifo.h */, + FB2729D0188A93360093CCC9 /* file.h */, + FB2729D1188A93360093CCC9 /* frame.h */, + FB2729D2188A93360093CCC9 /* hmac.h */, + FB2729D3188A93360093CCC9 /* imgutils.h */, + FB2729D4188A93360093CCC9 /* intfloat.h */, + FB2729D5188A93360093CCC9 /* intfloat_readwrite.h */, + FB2729D6188A93360093CCC9 /* intreadwrite.h */, + FB2729D7188A93360093CCC9 /* lfg.h */, + FB2729D8188A93360093CCC9 /* log.h */, + FB2729D9188A93360093CCC9 /* lzo.h */, + FB2729DA188A93360093CCC9 /* mathematics.h */, + FB2729DB188A93360093CCC9 /* md5.h */, + FB2729DC188A93360093CCC9 /* mem.h */, + FB2729DD188A93360093CCC9 /* murmur3.h */, + FB2729DE188A93360093CCC9 /* old_pix_fmts.h */, + FB2729DF188A93360093CCC9 /* opt.h */, + FB2729E0188A93360093CCC9 /* parseutils.h */, + FB2729E1188A93360093CCC9 /* pixdesc.h */, + FB2729E2188A93360093CCC9 /* pixfmt.h */, + FB2729E3188A93360093CCC9 /* random_seed.h */, + FB2729E4188A93360093CCC9 /* rational.h */, + FB2729E5188A93360093CCC9 /* ripemd.h */, + FB2729E6188A93360093CCC9 /* samplefmt.h */, + FB2729E7188A93360093CCC9 /* sha.h */, + FB2729E8188A93360093CCC9 /* sha512.h */, + FB2729E9188A93360093CCC9 /* time.h */, + FB2729EA188A93360093CCC9 /* timecode.h */, + FB2729EB188A93360093CCC9 /* timestamp.h */, + FB2729EC188A93360093CCC9 /* version.h */, + FB2729ED188A93360093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB2729EE188A93360093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB2729EF188A93360093CCC9 /* swresample.h */, + FB2729F0188A93360093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB2729F1188A93360093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB2729F2188A93360093CCC9 /* swscale.h */, + FB2729F3188A93360093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB2729F4188A93370093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB2729F5188A93370093CCC9 /* libavcodec.a */, + FB2729F6188A93370093CCC9 /* libavdevice.a */, + FB2729F7188A93370093CCC9 /* libavfilter.a */, + FB2729F8188A93370093CCC9 /* libavformat.a */, + FB2729F9188A93370093CCC9 /* libavresample.a */, + FB2729FA188A93370093CCC9 /* libavutil.a */, + FB2729FB188A93370093CCC9 /* libswresample.a */, + FB2729FC188A93370093CCC9 /* libswscale.a */, + FB2729FD188A93370093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB2729FD188A93370093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB2729FE188A93370093CCC9 /* libavcodec.pc */, + FB2729FF188A93370093CCC9 /* libavdevice.pc */, + FB272A00188A93370093CCC9 /* libavfilter.pc */, + FB272A01188A93370093CCC9 /* libavformat.pc */, + FB272A02188A93370093CCC9 /* libavresample.pc */, + FB272A03188A93370093CCC9 /* libavutil.pc */, + FB272A04188A93370093CCC9 /* libswresample.pc */, + FB272A05188A93370093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB272A06188A93370093CCC9 /* i386 */ = { + isa = PBXGroup; + children = ( + FB272A07188A93370093CCC9 /* include */, + FB272A5F188A93370093CCC9 /* lib */, + ); + path = i386; + sourceTree = ""; + }; + FB272A07188A93370093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB272A08188A93370093CCC9 /* libavcodec */, + FB272A12188A93370093CCC9 /* libavdevice */, + FB272A15188A93370093CCC9 /* libavfilter */, + FB272A1D188A93370093CCC9 /* libavformat */, + FB272A21188A93370093CCC9 /* libavresample */, + FB272A24188A93370093CCC9 /* libavutil */, + FB272A59188A93370093CCC9 /* libswresample */, + FB272A5C188A93370093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB272A08188A93370093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB272A09188A93370093CCC9 /* avcodec.h */, + FB272A0A188A93370093CCC9 /* avfft.h */, + FB272A0B188A93370093CCC9 /* dxva2.h */, + FB272A0C188A93370093CCC9 /* old_codec_ids.h */, + FB272A0D188A93370093CCC9 /* vaapi.h */, + FB272A0E188A93370093CCC9 /* vda.h */, + FB272A0F188A93370093CCC9 /* vdpau.h */, + FB272A10188A93370093CCC9 /* version.h */, + FB272A11188A93370093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB272A12188A93370093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB272A13188A93370093CCC9 /* avdevice.h */, + FB272A14188A93370093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB272A15188A93370093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB272A16188A93370093CCC9 /* asrc_abuffer.h */, + FB272A17188A93370093CCC9 /* avcodec.h */, + FB272A18188A93370093CCC9 /* avfilter.h */, + FB272A19188A93370093CCC9 /* avfiltergraph.h */, + FB272A1A188A93370093CCC9 /* buffersink.h */, + FB272A1B188A93370093CCC9 /* buffersrc.h */, + FB272A1C188A93370093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272A1D188A93370093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB272A1E188A93370093CCC9 /* avformat.h */, + FB272A1F188A93370093CCC9 /* avio.h */, + FB272A20188A93370093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB272A21188A93370093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB272A22188A93370093CCC9 /* avresample.h */, + FB272A23188A93370093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB272A24188A93370093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB272A25188A93370093CCC9 /* adler32.h */, + FB272A26188A93370093CCC9 /* aes.h */, + FB272A27188A93370093CCC9 /* attributes.h */, + FB272A28188A93370093CCC9 /* audio_fifo.h */, + FB272A29188A93370093CCC9 /* audioconvert.h */, + FB272A2A188A93370093CCC9 /* avassert.h */, + FB272A2B188A93370093CCC9 /* avconfig.h */, + FB272A2C188A93370093CCC9 /* avstring.h */, + FB272A2D188A93370093CCC9 /* avutil.h */, + FB272A2E188A93370093CCC9 /* base64.h */, + FB272A2F188A93370093CCC9 /* blowfish.h */, + FB272A30188A93370093CCC9 /* bprint.h */, + FB272A31188A93370093CCC9 /* bswap.h */, + FB272A32188A93370093CCC9 /* buffer.h */, + FB272A33188A93370093CCC9 /* channel_layout.h */, + FB272A34188A93370093CCC9 /* common.h */, + FB272A35188A93370093CCC9 /* cpu.h */, + FB272A36188A93370093CCC9 /* crc.h */, + FB272A37188A93370093CCC9 /* dict.h */, + FB272A38188A93370093CCC9 /* error.h */, + FB272A39188A93370093CCC9 /* eval.h */, + FB272A3A188A93370093CCC9 /* fifo.h */, + FB272A3B188A93370093CCC9 /* file.h */, + FB272A3C188A93370093CCC9 /* frame.h */, + FB272A3D188A93370093CCC9 /* hmac.h */, + FB272A3E188A93370093CCC9 /* imgutils.h */, + FB272A3F188A93370093CCC9 /* intfloat.h */, + FB272A40188A93370093CCC9 /* intfloat_readwrite.h */, + FB272A41188A93370093CCC9 /* intreadwrite.h */, + FB272A42188A93370093CCC9 /* lfg.h */, + FB272A43188A93370093CCC9 /* log.h */, + FB272A44188A93370093CCC9 /* lzo.h */, + FB272A45188A93370093CCC9 /* mathematics.h */, + FB272A46188A93370093CCC9 /* md5.h */, + FB272A47188A93370093CCC9 /* mem.h */, + FB272A48188A93370093CCC9 /* murmur3.h */, + FB272A49188A93370093CCC9 /* old_pix_fmts.h */, + FB272A4A188A93370093CCC9 /* opt.h */, + FB272A4B188A93370093CCC9 /* parseutils.h */, + FB272A4C188A93370093CCC9 /* pixdesc.h */, + FB272A4D188A93370093CCC9 /* pixfmt.h */, + FB272A4E188A93370093CCC9 /* random_seed.h */, + FB272A4F188A93370093CCC9 /* rational.h */, + FB272A50188A93370093CCC9 /* ripemd.h */, + FB272A51188A93370093CCC9 /* samplefmt.h */, + FB272A52188A93370093CCC9 /* sha.h */, + FB272A53188A93370093CCC9 /* sha512.h */, + FB272A54188A93370093CCC9 /* time.h */, + FB272A55188A93370093CCC9 /* timecode.h */, + FB272A56188A93370093CCC9 /* timestamp.h */, + FB272A57188A93370093CCC9 /* version.h */, + FB272A58188A93370093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB272A59188A93370093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB272A5A188A93370093CCC9 /* swresample.h */, + FB272A5B188A93370093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB272A5C188A93370093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB272A5D188A93370093CCC9 /* swscale.h */, + FB272A5E188A93370093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB272A5F188A93370093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB272A60188A93370093CCC9 /* libavcodec.a */, + FB272A61188A93370093CCC9 /* libavdevice.a */, + FB272A62188A93370093CCC9 /* libavfilter.a */, + FB272A63188A93370093CCC9 /* libavformat.a */, + FB272A64188A93370093CCC9 /* libavresample.a */, + FB272A65188A93370093CCC9 /* libavutil.a */, + FB272A66188A93370093CCC9 /* libswresample.a */, + FB272A67188A93370093CCC9 /* libswscale.a */, + FB272A68188A93370093CCC9 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FB272A68188A93370093CCC9 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FB272A69188A93370093CCC9 /* libavcodec.pc */, + FB272A6A188A93370093CCC9 /* libavdevice.pc */, + FB272A6B188A93370093CCC9 /* libavfilter.pc */, + FB272A6C188A93370093CCC9 /* libavformat.pc */, + FB272A6D188A93370093CCC9 /* libavresample.pc */, + FB272A6E188A93370093CCC9 /* libavutil.pc */, + FB272A6F188A93370093CCC9 /* libswresample.pc */, + FB272A70188A93370093CCC9 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FB272A71188A93370093CCC9 /* universal */ = { + isa = PBXGroup; + children = ( + FB272A72188A93370093CCC9 /* include */, + FB272ACA188A93370093CCC9 /* lib */, + ); + path = universal; + sourceTree = ""; + }; + FB272A72188A93370093CCC9 /* include */ = { + isa = PBXGroup; + children = ( + FB272A73188A93370093CCC9 /* libavcodec */, + FB272A7D188A93370093CCC9 /* libavdevice */, + FB272A80188A93370093CCC9 /* libavfilter */, + FB272A88188A93370093CCC9 /* libavformat */, + FB272A8C188A93370093CCC9 /* libavresample */, + FB272A8F188A93370093CCC9 /* libavutil */, + FB272AC4188A93370093CCC9 /* libswresample */, + FB272AC7188A93370093CCC9 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FB272A73188A93370093CCC9 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FB272A74188A93370093CCC9 /* avcodec.h */, + FB272A75188A93370093CCC9 /* avfft.h */, + FB272A76188A93370093CCC9 /* dxva2.h */, + FB272A77188A93370093CCC9 /* old_codec_ids.h */, + FB272A78188A93370093CCC9 /* vaapi.h */, + FB272A79188A93370093CCC9 /* vda.h */, + FB272A7A188A93370093CCC9 /* vdpau.h */, + FB272A7B188A93370093CCC9 /* version.h */, + FB272A7C188A93370093CCC9 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FB272A7D188A93370093CCC9 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FB272A7E188A93370093CCC9 /* avdevice.h */, + FB272A7F188A93370093CCC9 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FB272A80188A93370093CCC9 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FB272A81188A93370093CCC9 /* asrc_abuffer.h */, + FB272A82188A93370093CCC9 /* avcodec.h */, + FB272A83188A93370093CCC9 /* avfilter.h */, + FB272A84188A93370093CCC9 /* avfiltergraph.h */, + FB272A85188A93370093CCC9 /* buffersink.h */, + FB272A86188A93370093CCC9 /* buffersrc.h */, + FB272A87188A93370093CCC9 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FB272A88188A93370093CCC9 /* libavformat */ = { + isa = PBXGroup; + children = ( + FB272A89188A93370093CCC9 /* avformat.h */, + FB272A8A188A93370093CCC9 /* avio.h */, + FB272A8B188A93370093CCC9 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FB272A8C188A93370093CCC9 /* libavresample */ = { + isa = PBXGroup; + children = ( + FB272A8D188A93370093CCC9 /* avresample.h */, + FB272A8E188A93370093CCC9 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FB272A8F188A93370093CCC9 /* libavutil */ = { + isa = PBXGroup; + children = ( + FB272A90188A93370093CCC9 /* adler32.h */, + FB272A91188A93370093CCC9 /* aes.h */, + FB272A92188A93370093CCC9 /* attributes.h */, + FB272A93188A93370093CCC9 /* audio_fifo.h */, + FB272A94188A93370093CCC9 /* audioconvert.h */, + FB272A95188A93370093CCC9 /* avassert.h */, + FB272A96188A93370093CCC9 /* avconfig.h */, + FB272A97188A93370093CCC9 /* avstring.h */, + FB272A98188A93370093CCC9 /* avutil.h */, + FB272A99188A93370093CCC9 /* base64.h */, + FB272A9A188A93370093CCC9 /* blowfish.h */, + FB272A9B188A93370093CCC9 /* bprint.h */, + FB272A9C188A93370093CCC9 /* bswap.h */, + FB272A9D188A93370093CCC9 /* buffer.h */, + FB272A9E188A93370093CCC9 /* channel_layout.h */, + FB272A9F188A93370093CCC9 /* common.h */, + FB272AA0188A93370093CCC9 /* cpu.h */, + FB272AA1188A93370093CCC9 /* crc.h */, + FB272AA2188A93370093CCC9 /* dict.h */, + FB272AA3188A93370093CCC9 /* error.h */, + FB272AA4188A93370093CCC9 /* eval.h */, + FB272AA5188A93370093CCC9 /* fifo.h */, + FB272AA6188A93370093CCC9 /* file.h */, + FB272AA7188A93370093CCC9 /* frame.h */, + FB272AA8188A93370093CCC9 /* hmac.h */, + FB272AA9188A93370093CCC9 /* imgutils.h */, + FB272AAA188A93370093CCC9 /* intfloat.h */, + FB272AAB188A93370093CCC9 /* intfloat_readwrite.h */, + FB272AAC188A93370093CCC9 /* intreadwrite.h */, + FB272AAD188A93370093CCC9 /* lfg.h */, + FB272AAE188A93370093CCC9 /* log.h */, + FB272AAF188A93370093CCC9 /* lzo.h */, + FB272AB0188A93370093CCC9 /* mathematics.h */, + FB272AB1188A93370093CCC9 /* md5.h */, + FB272AB2188A93370093CCC9 /* mem.h */, + FB272AB3188A93370093CCC9 /* murmur3.h */, + FB272AB4188A93370093CCC9 /* old_pix_fmts.h */, + FB272AB5188A93370093CCC9 /* opt.h */, + FB272AB6188A93370093CCC9 /* parseutils.h */, + FB272AB7188A93370093CCC9 /* pixdesc.h */, + FB272AB8188A93370093CCC9 /* pixfmt.h */, + FB272AB9188A93370093CCC9 /* random_seed.h */, + FB272ABA188A93370093CCC9 /* rational.h */, + FB272ABB188A93370093CCC9 /* ripemd.h */, + FB272ABC188A93370093CCC9 /* samplefmt.h */, + FB272ABD188A93370093CCC9 /* sha.h */, + FB272ABE188A93370093CCC9 /* sha512.h */, + FB272ABF188A93370093CCC9 /* time.h */, + FB272AC0188A93370093CCC9 /* timecode.h */, + FB272AC1188A93370093CCC9 /* timestamp.h */, + FB272AC2188A93370093CCC9 /* version.h */, + FB272AC3188A93370093CCC9 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FB272AC4188A93370093CCC9 /* libswresample */ = { + isa = PBXGroup; + children = ( + FB272AC5188A93370093CCC9 /* swresample.h */, + FB272AC6188A93370093CCC9 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FB272AC7188A93370093CCC9 /* libswscale */ = { + isa = PBXGroup; + children = ( + FB272AC8188A93370093CCC9 /* swscale.h */, + FB272AC9188A93370093CCC9 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FB272ACA188A93370093CCC9 /* lib */ = { + isa = PBXGroup; + children = ( + FB272ACB188A93370093CCC9 /* libavcodec.a */, + FB272ACC188A93370093CCC9 /* libavdevice.a */, + FB272ACD188A93370093CCC9 /* libavfilter.a */, + FB272ACE188A93370093CCC9 /* libavformat.a */, + FB272ACF188A93370093CCC9 /* libavresample.a */, + FB272AD0188A93370093CCC9 /* libavutil.a */, + FB272AD1188A93370093CCC9 /* libswresample.a */, + FB272AD2188A93370093CCC9 /* libswscale.a */, + ); + path = lib; + sourceTree = ""; + }; + FB272AD3188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AD4188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AD4188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AD5188A93370093CCC9 /* AppDelegate.h */, + FB272AD6188A93370093CCC9 /* AppDelegate.m */, + FB272AD7188A93370093CCC9 /* InfoPlist.strings */, + FB272AD9188A93370093CCC9 /* Images.xcassets */, + FB272ADA188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272ADA188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272ADB188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272ADB188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272ADC188A93370093CCC9 /* AppDelegate.h */, + FB272ADD188A93370093CCC9 /* AppDelegate.m */, + FB272ADE188A93370093CCC9 /* InfoPlist.strings */, + FB272AE0188A93370093CCC9 /* Images.xcassets */, + FB272AE1188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AE1188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AE2188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AE2188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AE3188A93370093CCC9 /* AppDelegate.h */, + FB272AE4188A93370093CCC9 /* AppDelegate.m */, + FB272AE5188A93370093CCC9 /* InfoPlist.strings */, + FB272AE7188A93370093CCC9 /* Images.xcassets */, + FB272AE8188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AE8188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AE9188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AE9188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AEA188A93370093CCC9 /* AppDelegate.h */, + FB272AEB188A93370093CCC9 /* AppDelegate.m */, + FB272AEC188A93370093CCC9 /* InfoPlist.strings */, + FB272AEE188A93370093CCC9 /* Images.xcassets */, + FB272AEF188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AEF188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AF0188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AF0188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AF1188A93370093CCC9 /* AppDelegate.h */, + FB272AF2188A93370093CCC9 /* AppDelegate.m */, + FB272AF3188A93370093CCC9 /* InfoPlist.strings */, + FB272AF5188A93370093CCC9 /* Images.xcassets */, + FB272AF6188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AF6188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AF7188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AF7188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AF8188A93370093CCC9 /* AppDelegate.h */, + FB272AF9188A93370093CCC9 /* AppDelegate.m */, + FB272AFA188A93370093CCC9 /* InfoPlist.strings */, + FB272AFC188A93370093CCC9 /* Images.xcassets */, + FB272AFD188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AFD188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AFE188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272AFE188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272AFF188A93370093CCC9 /* AppDelegate.h */, + FB272B00188A93370093CCC9 /* AppDelegate.m */, + FB272B01188A93370093CCC9 /* InfoPlist.strings */, + FB272B03188A93370093CCC9 /* Images.xcassets */, + FB272B04188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B04188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B05188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B05188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B06188A93370093CCC9 /* AppDelegate.h */, + FB272B07188A93370093CCC9 /* AppDelegate.m */, + FB272B08188A93370093CCC9 /* InfoPlist.strings */, + FB272B0A188A93370093CCC9 /* Images.xcassets */, + FB272B0B188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B0B188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B0C188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B0C188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B0D188A93370093CCC9 /* AppDelegate.h */, + FB272B0E188A93370093CCC9 /* AppDelegate.m */, + FB272B0F188A93370093CCC9 /* InfoPlist.strings */, + FB272B11188A93370093CCC9 /* Images.xcassets */, + FB272B12188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B12188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B13188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B13188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B14188A93370093CCC9 /* AppDelegate.h */, + FB272B15188A93370093CCC9 /* AppDelegate.m */, + FB272B16188A93370093CCC9 /* InfoPlist.strings */, + FB272B18188A93370093CCC9 /* Images.xcassets */, + FB272B19188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B19188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B1A188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B1A188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B1B188A93370093CCC9 /* AppDelegate.h */, + FB272B1C188A93370093CCC9 /* AppDelegate.m */, + FB272B1D188A93370093CCC9 /* InfoPlist.strings */, + FB272B1F188A93370093CCC9 /* Images.xcassets */, + FB272B20188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B20188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B21188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B21188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B22188A93370093CCC9 /* AppDelegate.h */, + FB272B23188A93370093CCC9 /* AppDelegate.m */, + FB272B24188A93370093CCC9 /* InfoPlist.strings */, + FB272B26188A93370093CCC9 /* Images.xcassets */, + FB272B27188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B27188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B28188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B28188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B29188A93370093CCC9 /* AppDelegate.h */, + FB272B2A188A93370093CCC9 /* AppDelegate.m */, + FB272B2B188A93370093CCC9 /* InfoPlist.strings */, + FB272B2D188A93370093CCC9 /* Images.xcassets */, + FB272B2E188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B2E188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B2F188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B2F188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B30188A93370093CCC9 /* AppDelegate.h */, + FB272B31188A93370093CCC9 /* AppDelegate.m */, + FB272B32188A93370093CCC9 /* InfoPlist.strings */, + FB272B34188A93370093CCC9 /* Images.xcassets */, + FB272B35188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B35188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B36188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B36188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B37188A93370093CCC9 /* AppDelegate.h */, + FB272B38188A93370093CCC9 /* AppDelegate.m */, + FB272B39188A93370093CCC9 /* InfoPlist.strings */, + FB272B3B188A93370093CCC9 /* Images.xcassets */, + FB272B3C188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B3C188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B3D188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B3D188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B3E188A93370093CCC9 /* AppDelegate.h */, + FB272B3F188A93370093CCC9 /* AppDelegate.m */, + FB272B40188A93370093CCC9 /* InfoPlist.strings */, + FB272B42188A93370093CCC9 /* Images.xcassets */, + FB272B43188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B43188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B44188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B44188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B45188A93370093CCC9 /* AppDelegate.h */, + FB272B46188A93370093CCC9 /* AppDelegate.m */, + FB272B47188A93370093CCC9 /* InfoPlist.strings */, + FB272B49188A93370093CCC9 /* Images.xcassets */, + FB272B4A188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B4A188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B4B188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B4B188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B4C188A93370093CCC9 /* AppDelegate.h */, + FB272B4D188A93370093CCC9 /* AppDelegate.m */, + FB272B4E188A93370093CCC9 /* InfoPlist.strings */, + FB272B50188A93370093CCC9 /* Images.xcassets */, + FB272B51188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B51188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B52188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B52188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B53188A93370093CCC9 /* AppDelegate.h */, + FB272B54188A93370093CCC9 /* AppDelegate.m */, + FB272B55188A93370093CCC9 /* InfoPlist.strings */, + FB272B57188A93370093CCC9 /* Images.xcassets */, + FB272B58188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B58188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B59188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B59188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B5A188A93370093CCC9 /* AppDelegate.h */, + FB272B5B188A93370093CCC9 /* AppDelegate.m */, + FB272B5C188A93370093CCC9 /* InfoPlist.strings */, + FB272B5E188A93370093CCC9 /* Images.xcassets */, + FB272B5F188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B5F188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B60188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B60188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B61188A93370093CCC9 /* AppDelegate.h */, + FB272B62188A93370093CCC9 /* AppDelegate.m */, + FB272B63188A93370093CCC9 /* InfoPlist.strings */, + FB272B65188A93370093CCC9 /* Images.xcassets */, + FB272B66188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B66188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B67188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B67188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B68188A93370093CCC9 /* AppDelegate.h */, + FB272B69188A93370093CCC9 /* AppDelegate.m */, + FB272B6A188A93370093CCC9 /* InfoPlist.strings */, + FB272B6C188A93370093CCC9 /* Images.xcassets */, + FB272B6D188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B6D188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B6E188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B6E188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B6F188A93370093CCC9 /* AppDelegate.h */, + FB272B70188A93370093CCC9 /* AppDelegate.m */, + FB272B71188A93370093CCC9 /* InfoPlist.strings */, + FB272B73188A93370093CCC9 /* Images.xcassets */, + FB272B74188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B74188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B75188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B75188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B76188A93370093CCC9 /* AppDelegate.h */, + FB272B77188A93370093CCC9 /* AppDelegate.m */, + FB272B78188A93370093CCC9 /* InfoPlist.strings */, + FB272B7A188A93370093CCC9 /* Images.xcassets */, + FB272B7B188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B7B188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B7C188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B7C188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B7D188A93370093CCC9 /* AppDelegate.h */, + FB272B7E188A93370093CCC9 /* AppDelegate.m */, + FB272B7F188A93370093CCC9 /* InfoPlist.strings */, + FB272B81188A93370093CCC9 /* Images.xcassets */, + FB272B82188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B82188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B83188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B83188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B84188A93370093CCC9 /* AppDelegate.h */, + FB272B85188A93370093CCC9 /* AppDelegate.m */, + FB272B86188A93370093CCC9 /* InfoPlist.strings */, + FB272B88188A93370093CCC9 /* Images.xcassets */, + FB272B89188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B89188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B8A188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B8A188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B8B188A93370093CCC9 /* AppDelegate.h */, + FB272B8C188A93370093CCC9 /* AppDelegate.m */, + FB272B8D188A93370093CCC9 /* InfoPlist.strings */, + FB272B8F188A93370093CCC9 /* Images.xcassets */, + FB272B90188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B90188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B91188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B91188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B92188A93370093CCC9 /* AppDelegate.h */, + FB272B93188A93370093CCC9 /* AppDelegate.m */, + FB272B94188A93370093CCC9 /* InfoPlist.strings */, + FB272B96188A93370093CCC9 /* Images.xcassets */, + FB272B97188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B97188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B98188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B98188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B99188A93370093CCC9 /* AppDelegate.h */, + FB272B9A188A93370093CCC9 /* AppDelegate.m */, + FB272B9B188A93370093CCC9 /* InfoPlist.strings */, + FB272B9D188A93370093CCC9 /* Images.xcassets */, + FB272B9E188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B9E188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272B9F188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272B9F188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BA0188A93370093CCC9 /* AppDelegate.h */, + FB272BA1188A93370093CCC9 /* AppDelegate.m */, + FB272BA2188A93370093CCC9 /* InfoPlist.strings */, + FB272BA4188A93370093CCC9 /* Images.xcassets */, + FB272BA5188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BA5188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BA6188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BA6188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BA7188A93370093CCC9 /* AppDelegate.h */, + FB272BA8188A93370093CCC9 /* AppDelegate.m */, + FB272BA9188A93370093CCC9 /* InfoPlist.strings */, + FB272BAB188A93370093CCC9 /* Images.xcassets */, + FB272BAC188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BAC188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BAD188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BAD188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BAE188A93370093CCC9 /* AppDelegate.h */, + FB272BAF188A93370093CCC9 /* AppDelegate.m */, + FB272BB0188A93370093CCC9 /* InfoPlist.strings */, + FB272BB2188A93370093CCC9 /* Images.xcassets */, + FB272BB3188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BB3188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BB4188A93370093CCC9 /* Limelight-iOS */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BB4188A93370093CCC9 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB272BB5188A93370093CCC9 /* AppDelegate.h */, + FB272BB6188A93370093CCC9 /* AppDelegate.m */, + FB272BB7188A93370093CCC9 /* InfoPlist.strings */, + FB272BB9188A93370093CCC9 /* Images.xcassets */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FB272BC2188A93370093CCC9 /* Video */ = { + isa = PBXGroup; + children = ( + FB272BC3188A93370093CCC9 /* VideoDepacketizer.h */, + FB272BC4188A93370093CCC9 /* VideoDepacketizer.m */, + ); + path = Video; + sourceTree = ""; + }; + FB4E9485188BC39600CCC583 /* Debug-iphoneos */ = { + isa = PBXGroup; + children = ( + FB4E9486188BC39600CCC583 /* liblimelight-common.a */, + FB4E9487188BC39600CCC583 /* liblimelight-common.a.dSYM */, + ); + name = "Debug-iphoneos"; + path = "../../../Library/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos"; + sourceTree = ""; + }; + FBBC52C6188ACA92004D2BA0 /* armv7s */ = { + isa = PBXGroup; + children = ( + FBBC52C7188ACA92004D2BA0 /* include */, + FBBC531F188ACA93004D2BA0 /* lib */, + ); + name = armv7s; + path = "Limelight-iOS/libs/armv7s"; + sourceTree = ""; + }; + FBBC52C7188ACA92004D2BA0 /* include */ = { + isa = PBXGroup; + children = ( + FBBC52C8188ACA92004D2BA0 /* libavcodec */, + FBBC52D2188ACA92004D2BA0 /* libavdevice */, + FBBC52D5188ACA92004D2BA0 /* libavfilter */, + FBBC52DD188ACA92004D2BA0 /* libavformat */, + FBBC52E1188ACA92004D2BA0 /* libavresample */, + FBBC52E4188ACA92004D2BA0 /* libavutil */, + FBBC5319188ACA93004D2BA0 /* libswresample */, + FBBC531C188ACA93004D2BA0 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FBBC52C8188ACA92004D2BA0 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FBBC52C9188ACA92004D2BA0 /* avcodec.h */, + FBBC52CA188ACA92004D2BA0 /* avfft.h */, + FBBC52CB188ACA92004D2BA0 /* dxva2.h */, + FBBC52CC188ACA92004D2BA0 /* old_codec_ids.h */, + FBBC52CD188ACA92004D2BA0 /* vaapi.h */, + FBBC52CE188ACA92004D2BA0 /* vda.h */, + FBBC52CF188ACA92004D2BA0 /* vdpau.h */, + FBBC52D0188ACA92004D2BA0 /* version.h */, + FBBC52D1188ACA92004D2BA0 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FBBC52D2188ACA92004D2BA0 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FBBC52D3188ACA92004D2BA0 /* avdevice.h */, + FBBC52D4188ACA92004D2BA0 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FBBC52D5188ACA92004D2BA0 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FBBC52D6188ACA92004D2BA0 /* asrc_abuffer.h */, + FBBC52D7188ACA92004D2BA0 /* avcodec.h */, + FBBC52D8188ACA92004D2BA0 /* avfilter.h */, + FBBC52D9188ACA92004D2BA0 /* avfiltergraph.h */, + FBBC52DA188ACA92004D2BA0 /* buffersink.h */, + FBBC52DB188ACA92004D2BA0 /* buffersrc.h */, + FBBC52DC188ACA92004D2BA0 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FBBC52DD188ACA92004D2BA0 /* libavformat */ = { + isa = PBXGroup; + children = ( + FBBC52DE188ACA92004D2BA0 /* avformat.h */, + FBBC52DF188ACA92004D2BA0 /* avio.h */, + FBBC52E0188ACA92004D2BA0 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FBBC52E1188ACA92004D2BA0 /* libavresample */ = { + isa = PBXGroup; + children = ( + FBBC52E2188ACA92004D2BA0 /* avresample.h */, + FBBC52E3188ACA92004D2BA0 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FBBC52E4188ACA92004D2BA0 /* libavutil */ = { + isa = PBXGroup; + children = ( + FBBC52E5188ACA92004D2BA0 /* adler32.h */, + FBBC52E6188ACA92004D2BA0 /* aes.h */, + FBBC52E7188ACA92004D2BA0 /* attributes.h */, + FBBC52E8188ACA92004D2BA0 /* audio_fifo.h */, + FBBC52E9188ACA92004D2BA0 /* audioconvert.h */, + FBBC52EA188ACA92004D2BA0 /* avassert.h */, + FBBC52EB188ACA92004D2BA0 /* avconfig.h */, + FBBC52EC188ACA92004D2BA0 /* avstring.h */, + FBBC52ED188ACA92004D2BA0 /* avutil.h */, + FBBC52EE188ACA93004D2BA0 /* base64.h */, + FBBC52EF188ACA93004D2BA0 /* blowfish.h */, + FBBC52F0188ACA93004D2BA0 /* bprint.h */, + FBBC52F1188ACA93004D2BA0 /* bswap.h */, + FBBC52F2188ACA93004D2BA0 /* buffer.h */, + FBBC52F3188ACA93004D2BA0 /* channel_layout.h */, + FBBC52F4188ACA93004D2BA0 /* common.h */, + FBBC52F5188ACA93004D2BA0 /* cpu.h */, + FBBC52F6188ACA93004D2BA0 /* crc.h */, + FBBC52F7188ACA93004D2BA0 /* dict.h */, + FBBC52F8188ACA93004D2BA0 /* error.h */, + FBBC52F9188ACA93004D2BA0 /* eval.h */, + FBBC52FA188ACA93004D2BA0 /* fifo.h */, + FBBC52FB188ACA93004D2BA0 /* file.h */, + FBBC52FC188ACA93004D2BA0 /* frame.h */, + FBBC52FD188ACA93004D2BA0 /* hmac.h */, + FBBC52FE188ACA93004D2BA0 /* imgutils.h */, + FBBC52FF188ACA93004D2BA0 /* intfloat.h */, + FBBC5300188ACA93004D2BA0 /* intfloat_readwrite.h */, + FBBC5301188ACA93004D2BA0 /* intreadwrite.h */, + FBBC5302188ACA93004D2BA0 /* lfg.h */, + FBBC5303188ACA93004D2BA0 /* log.h */, + FBBC5304188ACA93004D2BA0 /* lzo.h */, + FBBC5305188ACA93004D2BA0 /* mathematics.h */, + FBBC5306188ACA93004D2BA0 /* md5.h */, + FBBC5307188ACA93004D2BA0 /* mem.h */, + FBBC5308188ACA93004D2BA0 /* murmur3.h */, + FBBC5309188ACA93004D2BA0 /* old_pix_fmts.h */, + FBBC530A188ACA93004D2BA0 /* opt.h */, + FBBC530B188ACA93004D2BA0 /* parseutils.h */, + FBBC530C188ACA93004D2BA0 /* pixdesc.h */, + FBBC530D188ACA93004D2BA0 /* pixfmt.h */, + FBBC530E188ACA93004D2BA0 /* random_seed.h */, + FBBC530F188ACA93004D2BA0 /* rational.h */, + FBBC5310188ACA93004D2BA0 /* ripemd.h */, + FBBC5311188ACA93004D2BA0 /* samplefmt.h */, + FBBC5312188ACA93004D2BA0 /* sha.h */, + FBBC5313188ACA93004D2BA0 /* sha512.h */, + FBBC5314188ACA93004D2BA0 /* time.h */, + FBBC5315188ACA93004D2BA0 /* timecode.h */, + FBBC5316188ACA93004D2BA0 /* timestamp.h */, + FBBC5317188ACA93004D2BA0 /* version.h */, + FBBC5318188ACA93004D2BA0 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FBBC5319188ACA93004D2BA0 /* libswresample */ = { + isa = PBXGroup; + children = ( + FBBC531A188ACA93004D2BA0 /* swresample.h */, + FBBC531B188ACA93004D2BA0 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FBBC531C188ACA93004D2BA0 /* libswscale */ = { + isa = PBXGroup; + children = ( + FBBC531D188ACA93004D2BA0 /* swscale.h */, + FBBC531E188ACA93004D2BA0 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FBBC531F188ACA93004D2BA0 /* lib */ = { + isa = PBXGroup; + children = ( + FBBC5320188ACA93004D2BA0 /* libavcodec.a */, + FBBC5321188ACA93004D2BA0 /* libavdevice.a */, + FBBC5322188ACA93004D2BA0 /* libavfilter.a */, + FBBC5323188ACA93004D2BA0 /* libavformat.a */, + FBBC5324188ACA93004D2BA0 /* libavresample.a */, + FBBC5325188ACA93004D2BA0 /* libavutil.a */, + FBBC5326188ACA93004D2BA0 /* libswresample.a */, + FBBC5327188ACA93004D2BA0 /* libswscale.a */, + FBBC5328188ACA93004D2BA0 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FBBC5328188ACA93004D2BA0 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FBBC5329188ACA93004D2BA0 /* libavcodec.pc */, + FBBC532A188ACA93004D2BA0 /* libavdevice.pc */, + FBBC532B188ACA93004D2BA0 /* libavfilter.pc */, + FBBC532C188ACA93004D2BA0 /* libavformat.pc */, + FBBC532D188ACA93004D2BA0 /* libavresample.pc */, + FBBC532E188ACA93004D2BA0 /* libavutil.pc */, + FBBC532F188ACA93004D2BA0 /* libswresample.pc */, + FBBC5330188ACA93004D2BA0 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FBBC5331188ACA99004D2BA0 /* armv7s */ = { + isa = PBXGroup; + children = ( + FBBC5332188ACA99004D2BA0 /* include */, + FBBC538A188ACA99004D2BA0 /* lib */, + ); + name = armv7s; + path = "Limelight-iOS/libs/armv7s"; + sourceTree = ""; + }; + FBBC5332188ACA99004D2BA0 /* include */ = { + isa = PBXGroup; + children = ( + FBBC5333188ACA99004D2BA0 /* libavcodec */, + FBBC533D188ACA99004D2BA0 /* libavdevice */, + FBBC5340188ACA99004D2BA0 /* libavfilter */, + FBBC5348188ACA99004D2BA0 /* libavformat */, + FBBC534C188ACA99004D2BA0 /* libavresample */, + FBBC534F188ACA99004D2BA0 /* libavutil */, + FBBC5384188ACA99004D2BA0 /* libswresample */, + FBBC5387188ACA99004D2BA0 /* libswscale */, + ); + path = include; + sourceTree = ""; + }; + FBBC5333188ACA99004D2BA0 /* libavcodec */ = { + isa = PBXGroup; + children = ( + FBBC5334188ACA99004D2BA0 /* avcodec.h */, + FBBC5335188ACA99004D2BA0 /* avfft.h */, + FBBC5336188ACA99004D2BA0 /* dxva2.h */, + FBBC5337188ACA99004D2BA0 /* old_codec_ids.h */, + FBBC5338188ACA99004D2BA0 /* vaapi.h */, + FBBC5339188ACA99004D2BA0 /* vda.h */, + FBBC533A188ACA99004D2BA0 /* vdpau.h */, + FBBC533B188ACA99004D2BA0 /* version.h */, + FBBC533C188ACA99004D2BA0 /* xvmc.h */, + ); + path = libavcodec; + sourceTree = ""; + }; + FBBC533D188ACA99004D2BA0 /* libavdevice */ = { + isa = PBXGroup; + children = ( + FBBC533E188ACA99004D2BA0 /* avdevice.h */, + FBBC533F188ACA99004D2BA0 /* version.h */, + ); + path = libavdevice; + sourceTree = ""; + }; + FBBC5340188ACA99004D2BA0 /* libavfilter */ = { + isa = PBXGroup; + children = ( + FBBC5341188ACA99004D2BA0 /* asrc_abuffer.h */, + FBBC5342188ACA99004D2BA0 /* avcodec.h */, + FBBC5343188ACA99004D2BA0 /* avfilter.h */, + FBBC5344188ACA99004D2BA0 /* avfiltergraph.h */, + FBBC5345188ACA99004D2BA0 /* buffersink.h */, + FBBC5346188ACA99004D2BA0 /* buffersrc.h */, + FBBC5347188ACA99004D2BA0 /* version.h */, + ); + path = libavfilter; + sourceTree = ""; + }; + FBBC5348188ACA99004D2BA0 /* libavformat */ = { + isa = PBXGroup; + children = ( + FBBC5349188ACA99004D2BA0 /* avformat.h */, + FBBC534A188ACA99004D2BA0 /* avio.h */, + FBBC534B188ACA99004D2BA0 /* version.h */, + ); + path = libavformat; + sourceTree = ""; + }; + FBBC534C188ACA99004D2BA0 /* libavresample */ = { + isa = PBXGroup; + children = ( + FBBC534D188ACA99004D2BA0 /* avresample.h */, + FBBC534E188ACA99004D2BA0 /* version.h */, + ); + path = libavresample; + sourceTree = ""; + }; + FBBC534F188ACA99004D2BA0 /* libavutil */ = { + isa = PBXGroup; + children = ( + FBBC5350188ACA99004D2BA0 /* adler32.h */, + FBBC5351188ACA99004D2BA0 /* aes.h */, + FBBC5352188ACA99004D2BA0 /* attributes.h */, + FBBC5353188ACA99004D2BA0 /* audio_fifo.h */, + FBBC5354188ACA99004D2BA0 /* audioconvert.h */, + FBBC5355188ACA99004D2BA0 /* avassert.h */, + FBBC5356188ACA99004D2BA0 /* avconfig.h */, + FBBC5357188ACA99004D2BA0 /* avstring.h */, + FBBC5358188ACA99004D2BA0 /* avutil.h */, + FBBC5359188ACA99004D2BA0 /* base64.h */, + FBBC535A188ACA99004D2BA0 /* blowfish.h */, + FBBC535B188ACA99004D2BA0 /* bprint.h */, + FBBC535C188ACA99004D2BA0 /* bswap.h */, + FBBC535D188ACA99004D2BA0 /* buffer.h */, + FBBC535E188ACA99004D2BA0 /* channel_layout.h */, + FBBC535F188ACA99004D2BA0 /* common.h */, + FBBC5360188ACA99004D2BA0 /* cpu.h */, + FBBC5361188ACA99004D2BA0 /* crc.h */, + FBBC5362188ACA99004D2BA0 /* dict.h */, + FBBC5363188ACA99004D2BA0 /* error.h */, + FBBC5364188ACA99004D2BA0 /* eval.h */, + FBBC5365188ACA99004D2BA0 /* fifo.h */, + FBBC5366188ACA99004D2BA0 /* file.h */, + FBBC5367188ACA99004D2BA0 /* frame.h */, + FBBC5368188ACA99004D2BA0 /* hmac.h */, + FBBC5369188ACA99004D2BA0 /* imgutils.h */, + FBBC536A188ACA99004D2BA0 /* intfloat.h */, + FBBC536B188ACA99004D2BA0 /* intfloat_readwrite.h */, + FBBC536C188ACA99004D2BA0 /* intreadwrite.h */, + FBBC536D188ACA99004D2BA0 /* lfg.h */, + FBBC536E188ACA99004D2BA0 /* log.h */, + FBBC536F188ACA99004D2BA0 /* lzo.h */, + FBBC5370188ACA99004D2BA0 /* mathematics.h */, + FBBC5371188ACA99004D2BA0 /* md5.h */, + FBBC5372188ACA99004D2BA0 /* mem.h */, + FBBC5373188ACA99004D2BA0 /* murmur3.h */, + FBBC5374188ACA99004D2BA0 /* old_pix_fmts.h */, + FBBC5375188ACA99004D2BA0 /* opt.h */, + FBBC5376188ACA99004D2BA0 /* parseutils.h */, + FBBC5377188ACA99004D2BA0 /* pixdesc.h */, + FBBC5378188ACA99004D2BA0 /* pixfmt.h */, + FBBC5379188ACA99004D2BA0 /* random_seed.h */, + FBBC537A188ACA99004D2BA0 /* rational.h */, + FBBC537B188ACA99004D2BA0 /* ripemd.h */, + FBBC537C188ACA99004D2BA0 /* samplefmt.h */, + FBBC537D188ACA99004D2BA0 /* sha.h */, + FBBC537E188ACA99004D2BA0 /* sha512.h */, + FBBC537F188ACA99004D2BA0 /* time.h */, + FBBC5380188ACA99004D2BA0 /* timecode.h */, + FBBC5381188ACA99004D2BA0 /* timestamp.h */, + FBBC5382188ACA99004D2BA0 /* version.h */, + FBBC5383188ACA99004D2BA0 /* xtea.h */, + ); + path = libavutil; + sourceTree = ""; + }; + FBBC5384188ACA99004D2BA0 /* libswresample */ = { + isa = PBXGroup; + children = ( + FBBC5385188ACA99004D2BA0 /* swresample.h */, + FBBC5386188ACA99004D2BA0 /* version.h */, + ); + path = libswresample; + sourceTree = ""; + }; + FBBC5387188ACA99004D2BA0 /* libswscale */ = { + isa = PBXGroup; + children = ( + FBBC5388188ACA99004D2BA0 /* swscale.h */, + FBBC5389188ACA99004D2BA0 /* version.h */, + ); + path = libswscale; + sourceTree = ""; + }; + FBBC538A188ACA99004D2BA0 /* lib */ = { + isa = PBXGroup; + children = ( + FBBC538B188ACA99004D2BA0 /* libavcodec.a */, + FBBC538C188ACA99004D2BA0 /* libavdevice.a */, + FBBC538D188ACA99004D2BA0 /* libavfilter.a */, + FBBC538E188ACA99004D2BA0 /* libavformat.a */, + FBBC538F188ACA99004D2BA0 /* libavresample.a */, + FBBC5390188ACA99004D2BA0 /* libavutil.a */, + FBBC5391188ACA99004D2BA0 /* libswresample.a */, + FBBC5392188ACA99004D2BA0 /* libswscale.a */, + FBBC5393188ACA99004D2BA0 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FBBC5393188ACA99004D2BA0 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FBBC5394188ACA99004D2BA0 /* libavcodec.pc */, + FBBC5395188ACA99004D2BA0 /* libavdevice.pc */, + FBBC5396188ACA99004D2BA0 /* libavfilter.pc */, + FBBC5397188ACA99004D2BA0 /* libavformat.pc */, + FBBC5398188ACA99004D2BA0 /* libavresample.pc */, + FBBC5399188ACA99004D2BA0 /* libavutil.pc */, + FBBC539A188ACA99004D2BA0 /* libswresample.pc */, + FBBC539B188ACA99004D2BA0 /* libswscale.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FBBC53C8188BA7AF004D2BA0 /* ffmpeg */ = { + isa = PBXGroup; + children = ( + FB27273C188A8BB20093CCC9 /* armv7 */, + FB2727A7188A8BB20093CCC9 /* armv7s */, + FB272812188A8BB20093CCC9 /* i386 */, + FB27287D188A8BB20093CCC9 /* universal */, + ); + name = ffmpeg; + sourceTree = ""; + }; + FBBC53CA188BA7C7004D2BA0 /* opus */ = { + isa = PBXGroup; + children = ( + FBBC53CB188BA7C7004D2BA0 /* dist-armv7 */, + FBBC53D7188BA7C7004D2BA0 /* dist-armv7s */, + ); + path = opus; + sourceTree = ""; + }; + FBBC53CB188BA7C7004D2BA0 /* dist-armv7 */ = { + isa = PBXGroup; + children = ( + FBBC53CC188BA7C7004D2BA0 /* include */, + FBBC53D2188BA7C7004D2BA0 /* lib */, + ); + path = "dist-armv7"; + sourceTree = ""; + }; + FBBC53CC188BA7C7004D2BA0 /* include */ = { + isa = PBXGroup; + children = ( + FBBC53CD188BA7C7004D2BA0 /* opus */, + ); + path = include; + sourceTree = ""; + }; + FBBC53CD188BA7C7004D2BA0 /* opus */ = { + isa = PBXGroup; + children = ( + FBBC53CE188BA7C7004D2BA0 /* opus.h */, + FBBC53CF188BA7C7004D2BA0 /* opus_defines.h */, + FBBC53D0188BA7C7004D2BA0 /* opus_multistream.h */, + FBBC53D1188BA7C7004D2BA0 /* opus_types.h */, + ); + path = opus; + sourceTree = ""; + }; + FBBC53D2188BA7C7004D2BA0 /* lib */ = { + isa = PBXGroup; + children = ( + FBBC53D3188BA7C7004D2BA0 /* libopus.a */, + FBBC53D4188BA7C7004D2BA0 /* libopus.la */, + FBBC53D5188BA7C7004D2BA0 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FBBC53D5188BA7C7004D2BA0 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FBBC53D6188BA7C7004D2BA0 /* opus.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FBBC53D7188BA7C7004D2BA0 /* dist-armv7s */ = { + isa = PBXGroup; + children = ( + FBBC53D8188BA7C7004D2BA0 /* include */, + FBBC53DE188BA7C7004D2BA0 /* lib */, + ); + path = "dist-armv7s"; + sourceTree = ""; + }; + FBBC53D8188BA7C7004D2BA0 /* include */ = { + isa = PBXGroup; + children = ( + FBBC53D9188BA7C7004D2BA0 /* opus */, + ); + path = include; + sourceTree = ""; + }; + FBBC53D9188BA7C7004D2BA0 /* opus */ = { + isa = PBXGroup; + children = ( + FBBC53DA188BA7C7004D2BA0 /* opus.h */, + FBBC53DB188BA7C7004D2BA0 /* opus_defines.h */, + FBBC53DC188BA7C7004D2BA0 /* opus_multistream.h */, + FBBC53DD188BA7C7004D2BA0 /* opus_types.h */, + ); + path = opus; + sourceTree = ""; + }; + FBBC53DE188BA7C7004D2BA0 /* lib */ = { + isa = PBXGroup; + children = ( + FBBC53DF188BA7C7004D2BA0 /* libopus.a */, + FBBC53E0188BA7C7004D2BA0 /* libopus.la */, + FBBC53E1188BA7C7004D2BA0 /* pkgconfig */, + ); + path = lib; + sourceTree = ""; + }; + FBBC53E1188BA7C7004D2BA0 /* pkgconfig */ = { + isa = PBXGroup; + children = ( + FBBC53E2188BA7C7004D2BA0 /* opus.pc */, + ); + path = pkgconfig; + sourceTree = ""; + }; + FBC18B2C188A4DC900D5D34E /* Video */ = { + isa = PBXGroup; + children = ( + FBC18B2D188A4E0500D5D34E /* VideoDepacketizer.h */, + FBC18B2E188A4E0500D5D34E /* VideoDepacketizer.m */, + FBC18B30188A5A1100D5D34E /* VideoDecoder.h */, + FBC18B31188A5A1100D5D34E /* VideoDecoder.m */, + FB4E945E188BB80700CCC583 /* Connection.h */, + FB4E945F188BB80700CCC583 /* Connection.m */, + FB4E9489188BCC2900CCC583 /* VideoRenderer.h */, + FB4E948A188BCC2900CCC583 /* VideoRenderer.m */, + FBF6AE7D188A274100B50578 /* Supporting Files */, + ); + name = Video; + sourceTree = ""; + }; + FBF6AE68188A274100B50578 = { + isa = PBXGroup; + children = ( + FBF6AE7C188A274100B50578 /* Limelight-iOS */, + FBF6AE99188A274100B50578 /* Limelight-iOSTests */, + FBF6AE73188A274100B50578 /* Frameworks */, + FBF6AE72188A274100B50578 /* Products */, + ); + sourceTree = ""; + }; + FBF6AE72188A274100B50578 /* Products */ = { + isa = PBXGroup; + children = ( + FBF6AE71188A274100B50578 /* Limelight-iOS.app */, + FBF6AE91188A274100B50578 /* Limelight-iOSTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + FBF6AE73188A274100B50578 /* Frameworks */ = { + isa = PBXGroup; + children = ( + FB05E4C31899EC4100E0CAC7 /* libxml2.2.dylib */, + FB05E4C11899E81500E0CAC7 /* libxml2.dylib */, + FB05E4BF1899730B00E0CAC7 /* AdSupport.framework */, + FB4E9485188BC39600CCC583 /* Debug-iphoneos */, + FB4E9461188BC09600CCC583 /* liblimelight-common.a */, + FB4E945C188BB6FA00CCC583 /* liblimelight-common.a */, + FBBC53E9188BA7FC004D2BA0 /* libavcodec.a */, + FBBC53EA188BA7FC004D2BA0 /* libavdevice.a */, + FBBC53EB188BA7FC004D2BA0 /* libavfilter.a */, + FBBC53EC188BA7FC004D2BA0 /* libavformat.a */, + FBBC53ED188BA7FC004D2BA0 /* libavresample.a */, + FBBC53EE188BA7FC004D2BA0 /* libavutil.a */, + FBBC53EF188BA7FC004D2BA0 /* libswresample.a */, + FBBC53F0188BA7FC004D2BA0 /* libswscale.a */, + FBBC53A4188ACD01004D2BA0 /* libz.dylib */, + FBBC5331188ACA99004D2BA0 /* armv7s */, + FBBC52C6188ACA92004D2BA0 /* armv7s */, + FB272917188A93360093CCC9 /* Limelight-iOS */, + FB27272A188A8B3C0093CCC9 /* libavcodec.a */, + FB27272B188A8B3C0093CCC9 /* libavdevice.a */, + FB27272C188A8B3C0093CCC9 /* libavfilter.a */, + FB27272D188A8B3C0093CCC9 /* libavformat.a */, + FB27272E188A8B3C0093CCC9 /* libavresample.a */, + FB27272F188A8B3C0093CCC9 /* libavutil.a */, + FB272730188A8B3C0093CCC9 /* libswresample.a */, + FB272731188A8B3C0093CCC9 /* libswscale.a */, + FB272728188A8A000093CCC9 /* libavcodec.a */, + FBF6AE74188A274100B50578 /* Foundation.framework */, + FBF6AE76188A274100B50578 /* CoreGraphics.framework */, + FBF6AE78188A274100B50578 /* UIKit.framework */, + FBF6AE7A188A274100B50578 /* CoreData.framework */, + FBF6AE92188A274100B50578 /* XCTest.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; + FBF6AE7C188A274100B50578 /* Limelight-iOS */ = { + isa = PBXGroup; + children = ( + FB27273B188A8BB20093CCC9 /* libs */, + FBC18B2C188A4DC900D5D34E /* Video */, + FBC18AE2188A2AB500D5D34E /* MainFrame.storyboard */, + FBF6AE85188A274100B50578 /* AppDelegate.h */, + FBF6AE86188A274100B50578 /* AppDelegate.m */, + FBF6AE8B188A274100B50578 /* Images.xcassets */, + FBF6AE88188A274100B50578 /* Limelight_iOS.xcdatamodeld */, + FBC18B29188A3B9100D5D34E /* MainFrameViewController.h */, + FBC18B2A188A3B9100D5D34E /* MainFrameViewController.m */, + FBBC53A6188AD1D3004D2BA0 /* StreamFrameViewController.h */, + FBBC53A7188AD1D3004D2BA0 /* StreamFrameViewController.m */, + FBBC53AC188AE27D004D2BA0 /* StreamView.h */, + FBBC53AD188AE27D004D2BA0 /* StreamView.m */, + FB05E4BC1896D83700E0CAC7 /* ConnectionHandler.h */, + FB05E4BD1896D83700E0CAC7 /* ConnectionHandler.m */, + ); + path = "Limelight-iOS"; + sourceTree = ""; + }; + FBF6AE7D188A274100B50578 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + FBF6AE7E188A274100B50578 /* Limelight-iOS-Info.plist */, + FBF6AE7F188A274100B50578 /* InfoPlist.strings */, + FBF6AE82188A274100B50578 /* main.m */, + FBF6AE84188A274100B50578 /* Limelight-iOS-Prefix.pch */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + FBF6AE99188A274100B50578 /* Limelight-iOSTests */ = { + isa = PBXGroup; + children = ( + FBF6AE9F188A274100B50578 /* Limelight_iOSTests.m */, + FBF6AE9A188A274100B50578 /* Supporting Files */, + ); + path = "Limelight-iOSTests"; + sourceTree = ""; + }; + FBF6AE9A188A274100B50578 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + FBF6AE9B188A274100B50578 /* Limelight-iOSTests-Info.plist */, + FBF6AE9C188A274100B50578 /* InfoPlist.strings */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + FBF6AE70188A274100B50578 /* Limelight-iOS */ = { + isa = PBXNativeTarget; + buildConfigurationList = FBF6AEA3188A274100B50578 /* Build configuration list for PBXNativeTarget "Limelight-iOS" */; + buildPhases = ( + FBF6AE6D188A274100B50578 /* Sources */, + FBF6AE6F188A274100B50578 /* Resources */, + FBF6AE6E188A274100B50578 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "Limelight-iOS"; + productName = "Limelight-iOS"; + productReference = FBF6AE71188A274100B50578 /* Limelight-iOS.app */; + productType = "com.apple.product-type.application"; + }; + FBF6AE90188A274100B50578 /* Limelight-iOSTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = FBF6AEA6188A274100B50578 /* Build configuration list for PBXNativeTarget "Limelight-iOSTests" */; + buildPhases = ( + FBF6AE8D188A274100B50578 /* Sources */, + FBF6AE8E188A274100B50578 /* Frameworks */, + FBF6AE8F188A274100B50578 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + FBF6AE98188A274100B50578 /* PBXTargetDependency */, + ); + name = "Limelight-iOSTests"; + productName = "Limelight-iOSTests"; + productReference = FBF6AE91188A274100B50578 /* Limelight-iOSTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + FBF6AE69188A274100B50578 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0500; + ORGANIZATIONNAME = "Diego Waxemberg"; + TargetAttributes = { + FBF6AE70188A274100B50578 = { + DevelopmentTeam = DM46QST4M7; + }; + FBF6AE90188A274100B50578 = { + TestTargetID = FBF6AE70188A274100B50578; + }; + }; + }; + buildConfigurationList = FBF6AE6C188A274100B50578 /* Build configuration list for PBXProject "Limelight-iOS" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 0; + knownRegions = ( + en, + ); + mainGroup = FBF6AE68188A274100B50578; + productRefGroup = FBF6AE72188A274100B50578 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + FBF6AE70188A274100B50578 /* Limelight-iOS */, + FBF6AE90188A274100B50578 /* Limelight-iOSTests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + FBF6AE6F188A274100B50578 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FBC18AE3188A2AB500D5D34E /* MainFrame.storyboard in Resources */, + FBF6AE8C188A274100B50578 /* Images.xcassets in Resources */, + FBBC53E7188BA7C7004D2BA0 /* libopus.la in Resources */, + FBBC53E5188BA7C7004D2BA0 /* opus.pc in Resources */, + FBBC53E4188BA7C7004D2BA0 /* libopus.la in Resources */, + FBBC53E8188BA7C7004D2BA0 /* opus.pc in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FBF6AE8F188A274100B50578 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FBF6AE9E188A274100B50578 /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + FBF6AE6D188A274100B50578 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FB4E948B188BCC2900CCC583 /* VideoRenderer.m in Sources */, + FB05E4BE1896D83700E0CAC7 /* ConnectionHandler.m in Sources */, + FBC18B2F188A4E0500D5D34E /* VideoDepacketizer.m in Sources */, + FBF6AE87188A274100B50578 /* AppDelegate.m in Sources */, + FBBC53A8188AD1D3004D2BA0 /* StreamFrameViewController.m in Sources */, + FBC18B32188A5A1100D5D34E /* VideoDecoder.m in Sources */, + FBF6AE8A188A274100B50578 /* Limelight_iOS.xcdatamodeld in Sources */, + FBF6AE83188A274100B50578 /* main.m in Sources */, + FBBC53AE188AE27D004D2BA0 /* StreamView.m in Sources */, + FBC18B2B188A3B9100D5D34E /* MainFrameViewController.m in Sources */, + FB4E9460188BB80700CCC583 /* Connection.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + FBF6AE8D188A274100B50578 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FBF6AEA0188A274100B50578 /* Limelight_iOSTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + FBF6AE98188A274100B50578 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = FBF6AE70188A274100B50578 /* Limelight-iOS */; + targetProxy = FBF6AE97188A274100B50578 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + FB27291A188A93360093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB27291B188A93360093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272AD7188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272AD8188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272ADE188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272ADF188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272AE5188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272AE6188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272AEC188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272AED188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272AF3188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272AF4188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272AFA188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272AFB188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B01188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B02188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B08188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B09188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B0F188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B10188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B16188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B17188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B1D188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B1E188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B24188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B25188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B2B188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B2C188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B32188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B33188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B39188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B3A188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B40188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B41188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B47188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B48188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B4E188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B4F188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B55188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B56188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B5C188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B5D188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B63188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B64188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B6A188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B6B188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B71188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B72188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B78188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B79188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B7F188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B80188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B86188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B87188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B8D188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B8E188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B94188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B95188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272B9B188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272B9C188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272BA2188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272BA3188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272BA9188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272BAA188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272BB0188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272BB1188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FB272BB7188A93370093CCC9 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FB272BB8188A93370093CCC9 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FBF6AE7F188A274100B50578 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FBF6AE80188A274100B50578 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + FBF6AE9C188A274100B50578 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + FBF6AE9D188A274100B50578 /* en */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + FBF6AEA1188A274100B50578 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD)"; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + DSTROOT = "/tmp/$(PROJECT_NAME).dst"; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_OPTIMIZATION_LEVEL = fast; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + ONLY_ACTIVE_ARCH = NO; + PRODUCT_NAME = Limelight; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALID_ARCHS = armv7; + }; + name = Debug; + }; + FBF6AEA2188A274100B50578 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD)"; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = YES; + DSTROOT = "/tmp/$(PROJECT_NAME).dst"; + ENABLE_NS_ASSERTIONS = NO; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_OPTIMIZATION_LEVEL = fast; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 7.0; + PRODUCT_NAME = Limelight; + SDKROOT = iphoneos; + TARGETED_DEVICE_FAMILY = "1,2"; + VALIDATE_PRODUCT = YES; + VALID_ARCHS = armv7; + }; + name = Release; + }; + FBF6AEA4188A274100B50578 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; + CODE_SIGN_IDENTITY = "iPhone Developer"; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Limelight-iOS/Limelight-iOS-Prefix.pch"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include, + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/ffmpeg/armv7/include/**", + "/Users/diegowaxemberg/Documents/Repositories/limelight-common-c/limelight-common", + /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS7.0.sdk/usr/include/libxml2, + ); + INFOPLIST_FILE = "Limelight-iOS/Limelight-iOS-Info.plist"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/ffmpeg/armv7/lib", + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/opus/dist-armv7/lib", + "/Users/diegowaxemberg/Library/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos", + "$(USER_LIBRARY_DIR)/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos", + "$(USER_LIBRARY_DIR)/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Intermediates/limelight-common-c.build/Debug-iphoneos/limelight-common.build/Objects-normal/armv7", + ); + PRODUCT_NAME = "$(TARGET_NAME)"; + WRAPPER_EXTENSION = app; + }; + name = Debug; + }; + FBF6AEA5188A274100B50578 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage; + CODE_SIGN_IDENTITY = "iPhone Developer"; + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Limelight-iOS/Limelight-iOS-Prefix.pch"; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include, + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/ffmpeg/armv7/include/**", + "/Users/diegowaxemberg/Documents/Repositories/limelight-common-c/limelight-common", + /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS7.0.sdk/usr/include/libxml2, + ); + INFOPLIST_FILE = "Limelight-iOS/Limelight-iOS-Info.plist"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/ffmpeg/armv7/lib", + "/Users/diegowaxemberg/Documents/Repositories/Limelight-iOS/Limelight-iOS/libs/opus/dist-armv7/lib", + "/Users/diegowaxemberg/Library/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos", + "$(USER_LIBRARY_DIR)/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Products/Debug-iphoneos", + "$(USER_LIBRARY_DIR)/Developer/Xcode/DerivedData/limelight-common-c-cfonfottpvhxxcgrusjsydwkdaco/Build/Intermediates/limelight-common-c.build/Debug-iphoneos/limelight-common.build/Objects-normal/armv7", + ); + PRODUCT_NAME = "$(TARGET_NAME)"; + WRAPPER_EXTENSION = app; + }; + name = Release; + }; + FBF6AEA7188A274100B50578 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/Limelight-iOS.app/Limelight-iOS"; + FRAMEWORK_SEARCH_PATHS = ( + "$(SDKROOT)/Developer/Library/Frameworks", + "$(inherited)", + "$(DEVELOPER_FRAMEWORKS_DIR)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Limelight-iOS/Limelight-iOS-Prefix.pch"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + INFOPLIST_FILE = "Limelight-iOSTests/Limelight-iOSTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Debug; + }; + FBF6AEA8188A274100B50578 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_INCLUDING_64_BIT)"; + BUNDLE_LOADER = "$(BUILT_PRODUCTS_DIR)/Limelight-iOS.app/Limelight-iOS"; + FRAMEWORK_SEARCH_PATHS = ( + "$(SDKROOT)/Developer/Library/Frameworks", + "$(inherited)", + "$(DEVELOPER_FRAMEWORKS_DIR)", + ); + GCC_PRECOMPILE_PREFIX_HEADER = YES; + GCC_PREFIX_HEADER = "Limelight-iOS/Limelight-iOS-Prefix.pch"; + INFOPLIST_FILE = "Limelight-iOSTests/Limelight-iOSTests-Info.plist"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUNDLE_LOADER)"; + WRAPPER_EXTENSION = xctest; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + FBF6AE6C188A274100B50578 /* Build configuration list for PBXProject "Limelight-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FBF6AEA1188A274100B50578 /* Debug */, + FBF6AEA2188A274100B50578 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FBF6AEA3188A274100B50578 /* Build configuration list for PBXNativeTarget "Limelight-iOS" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FBF6AEA4188A274100B50578 /* Debug */, + FBF6AEA5188A274100B50578 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + FBF6AEA6188A274100B50578 /* Build configuration list for PBXNativeTarget "Limelight-iOSTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + FBF6AEA7188A274100B50578 /* Debug */, + FBF6AEA8188A274100B50578 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + +/* Begin XCVersionGroup section */ + FB272BBC188A93370093CCC9 /* Limelight_iOS.xcdatamodeld */ = { + isa = XCVersionGroup; + children = ( + FB272BBD188A93370093CCC9 /* Limelight_iOS.xcdatamodel */, + ); + currentVersion = FB272BBD188A93370093CCC9 /* Limelight_iOS.xcdatamodel */; + path = Limelight_iOS.xcdatamodeld; + sourceTree = ""; + versionGroupType = wrapper.xcdatamodel; + }; + FBF6AE88188A274100B50578 /* Limelight_iOS.xcdatamodeld */ = { + isa = XCVersionGroup; + children = ( + FBF6AE89188A274100B50578 /* Limelight_iOS.xcdatamodel */, + ); + currentVersion = FBF6AE89188A274100B50578 /* Limelight_iOS.xcdatamodel */; + path = Limelight_iOS.xcdatamodeld; + sourceTree = ""; + versionGroupType = wrapper.xcdatamodel; + }; +/* End XCVersionGroup section */ + }; + rootObject = FBF6AE69188A274100B50578 /* Project object */; +} diff --git a/Limelight-iOS.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Limelight-iOS.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..0311388 --- /dev/null +++ b/Limelight-iOS.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/Limelight-iOS.xcodeproj/project.xcworkspace/xcshareddata/Limelight-iOS.xccheckout b/Limelight-iOS.xcodeproj/project.xcworkspace/xcshareddata/Limelight-iOS.xccheckout new file mode 100644 index 0000000..0f0c92f --- /dev/null +++ b/Limelight-iOS.xcodeproj/project.xcworkspace/xcshareddata/Limelight-iOS.xccheckout @@ -0,0 +1,41 @@ + + + + + IDESourceControlProjectFavoriteDictionaryKey + + IDESourceControlProjectIdentifier + EF82C59B-751E-49DC-BD4F-CAB151CFCF08 + IDESourceControlProjectName + Limelight-iOS + IDESourceControlProjectOriginsDictionary + + C8722A1B-5422-4080-8DCF-F008E289FAE0 + ssh://github.com/limelight-stream/limelight-ios.git + + IDESourceControlProjectPath + Limelight-iOS.xcodeproj/project.xcworkspace + IDESourceControlProjectRelativeInstallPathDictionary + + C8722A1B-5422-4080-8DCF-F008E289FAE0 + ../.. + + IDESourceControlProjectURL + ssh://github.com/limelight-stream/limelight-ios.git + IDESourceControlProjectVersion + 110 + IDESourceControlProjectWCCIdentifier + C8722A1B-5422-4080-8DCF-F008E289FAE0 + IDESourceControlProjectWCConfigurations + + + IDESourceControlRepositoryExtensionIdentifierKey + public.vcs.git + IDESourceControlWCCIdentifierKey + C8722A1B-5422-4080-8DCF-F008E289FAE0 + IDESourceControlWCCName + Limelight-iOS + + + + diff --git a/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist new file mode 100644 index 0000000..f13ab50 --- /dev/null +++ b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcdebugger/Breakpoints_v2.xcbkptlist @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + + + + diff --git a/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/Limelight-iOS.xcscheme b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/Limelight-iOS.xcscheme new file mode 100644 index 0000000..b70b19a --- /dev/null +++ b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/Limelight-iOS.xcscheme @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/xcschememanagement.plist b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/xcschememanagement.plist new file mode 100644 index 0000000..5b67c0c --- /dev/null +++ b/Limelight-iOS.xcodeproj/xcuserdata/diegowaxemberg.xcuserdatad/xcschemes/xcschememanagement.plist @@ -0,0 +1,27 @@ + + + + + SchemeUserState + + Limelight-iOS.xcscheme + + orderHint + 0 + + + SuppressBuildableAutocreation + + FBF6AE70188A274100B50578 + + primary + + + FBF6AE90188A274100B50578 + + primary + + + + + diff --git a/Limelight-iOS/AppDelegate.h b/Limelight-iOS/AppDelegate.h new file mode 100644 index 0000000..1b744af --- /dev/null +++ b/Limelight-iOS/AppDelegate.h @@ -0,0 +1,23 @@ +// +// AppDelegate.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface AppDelegate : UIResponder + +@property (strong, nonatomic) UIWindow *window; + +@property (readonly, strong, nonatomic) NSManagedObjectContext *managedObjectContext; +@property (readonly, strong, nonatomic) NSManagedObjectModel *managedObjectModel; +@property (readonly, strong, nonatomic) NSPersistentStoreCoordinator *persistentStoreCoordinator; + +- (void)saveContext; +- (NSURL *)applicationDocumentsDirectory; ++ (NSOperationQueue*) getMainOpQueue; + +@end diff --git a/Limelight-iOS/AppDelegate.m b/Limelight-iOS/AppDelegate.m new file mode 100644 index 0000000..d672ad8 --- /dev/null +++ b/Limelight-iOS/AppDelegate.m @@ -0,0 +1,153 @@ +// +// AppDelegate.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "AppDelegate.h" + +@implementation AppDelegate + +@synthesize managedObjectContext = _managedObjectContext; +@synthesize managedObjectModel = _managedObjectModel; +@synthesize persistentStoreCoordinator = _persistentStoreCoordinator; + +static NSOperationQueue* mainQueue; + +- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions +{ + mainQueue = [[NSOperationQueue alloc]init]; + return YES; +} + ++ (NSOperationQueue*) getMainOpQueue +{ + return mainQueue; +} + +- (void)applicationWillResignActive:(UIApplication *)application +{ + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game. +} + +- (void)applicationDidEnterBackground:(UIApplication *)application +{ + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. +} + +- (void)applicationWillEnterForeground:(UIApplication *)application +{ + // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background. +} + +- (void)applicationDidBecomeActive:(UIApplication *)application +{ + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. +} + +- (void)applicationWillTerminate:(UIApplication *)application +{ + // Saves changes in the application's managed object context before the application terminates. + [self saveContext]; +} + +- (void)saveContext +{ + NSError *error = nil; + NSManagedObjectContext *managedObjectContext = self.managedObjectContext; + if (managedObjectContext != nil) { + if ([managedObjectContext hasChanges] && ![managedObjectContext save:&error]) { + // Replace this implementation with code to handle the error appropriately. + // abort() causes the application to generate a crash log and terminate. You should not use this function in a shipping application, although it may be useful during development. + NSLog(@"Unresolved error %@, %@", error, [error userInfo]); + abort(); + } + } +} + +#pragma mark - Core Data stack + +// Returns the managed object context for the application. +// If the context doesn't already exist, it is created and bound to the persistent store coordinator for the application. +- (NSManagedObjectContext *)managedObjectContext +{ + if (_managedObjectContext != nil) { + return _managedObjectContext; + } + + NSPersistentStoreCoordinator *coordinator = [self persistentStoreCoordinator]; + if (coordinator != nil) { + _managedObjectContext = [[NSManagedObjectContext alloc] init]; + [_managedObjectContext setPersistentStoreCoordinator:coordinator]; + } + return _managedObjectContext; +} + +// Returns the managed object model for the application. +// If the model doesn't already exist, it is created from the application's model. +- (NSManagedObjectModel *)managedObjectModel +{ + if (_managedObjectModel != nil) { + return _managedObjectModel; + } + NSURL *modelURL = [[NSBundle mainBundle] URLForResource:@"Limelight_iOS" withExtension:@"momd"]; + _managedObjectModel = [[NSManagedObjectModel alloc] initWithContentsOfURL:modelURL]; + return _managedObjectModel; +} + +// Returns the persistent store coordinator for the application. +// If the coordinator doesn't already exist, it is created and the application's store added to it. +- (NSPersistentStoreCoordinator *)persistentStoreCoordinator +{ + if (_persistentStoreCoordinator != nil) { + return _persistentStoreCoordinator; + } + + NSURL *storeURL = [[self applicationDocumentsDirectory] URLByAppendingPathComponent:@"Limelight_iOS.sqlite"]; + + NSError *error = nil; + _persistentStoreCoordinator = [[NSPersistentStoreCoordinator alloc] initWithManagedObjectModel:[self managedObjectModel]]; + if (![_persistentStoreCoordinator addPersistentStoreWithType:NSSQLiteStoreType configuration:nil URL:storeURL options:nil error:&error]) { + /* + Replace this implementation with code to handle the error appropriately. + + abort() causes the application to generate a crash log and terminate. You should not use this function in a shipping application, although it may be useful during development. + + Typical reasons for an error here include: + * The persistent store is not accessible; + * The schema for the persistent store is incompatible with current managed object model. + Check the error message to determine what the actual problem was. + + + If the persistent store is not accessible, there is typically something wrong with the file path. Often, a file URL is pointing into the application's resources directory instead of a writeable directory. + + If you encounter schema incompatibility errors during development, you can reduce their frequency by: + * Simply deleting the existing store: + [[NSFileManager defaultManager] removeItemAtURL:storeURL error:nil] + + * Performing automatic lightweight migration by passing the following dictionary as the options parameter: + @{NSMigratePersistentStoresAutomaticallyOption:@YES, NSInferMappingModelAutomaticallyOption:@YES} + + Lightweight migration will only work for a limited set of schema changes; consult "Core Data Model Versioning and Data Migration Programming Guide" for details. + + */ + NSLog(@"Unresolved error %@, %@", error, [error userInfo]); + abort(); + } + + return _persistentStoreCoordinator; +} + +#pragma mark - Application's Documents directory + +// Returns the URL to the application's Documents directory. +- (NSURL *)applicationDocumentsDirectory +{ + return [[[NSFileManager defaultManager] URLsForDirectory:NSDocumentDirectory inDomains:NSUserDomainMask] lastObject]; +} + +@end diff --git a/Limelight-iOS/Connection.h b/Limelight-iOS/Connection.h new file mode 100644 index 0000000..0d9bd73 --- /dev/null +++ b/Limelight-iOS/Connection.h @@ -0,0 +1,16 @@ +// +// Connection.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/19/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface Connection : NSOperation + +-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height; +-(void) main; + +@end diff --git a/Limelight-iOS/Connection.m b/Limelight-iOS/Connection.m new file mode 100644 index 0000000..959a131 --- /dev/null +++ b/Limelight-iOS/Connection.m @@ -0,0 +1,92 @@ +// +// Connection.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/19/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "Connection.h" + +#include + +#include "VideoDecoder.h" +#include "VideoRenderer.h" + +@implementation Connection { + IP_ADDRESS host; + STREAM_CONFIGURATION streamConfig; + DECODER_RENDERER_CALLBACKS callbacks; +} + +void setup(int width, int height, int fps, void* context, int drFlags) +{ + printf("Setup video\n"); + nv_avc_init(width, height, DISABLE_LOOP_FILTER | FAST_DECODE | FAST_BILINEAR_FILTERING, 2); +} + +void submitDecodeUnit(PDECODE_UNIT decodeUnit) +{ + unsigned char* data = (unsigned char*) malloc(decodeUnit->fullLength + nv_avc_get_input_padding_size()); + if (data != NULL) { + int offset = 0; + int err; + + PLENTRY entry = decodeUnit->bufferList; + while (entry != NULL) { + memcpy(&data[offset], entry->data, entry->length); + offset += entry->length; + entry = entry->next; + } + + err = nv_avc_decode(data, decodeUnit->fullLength); + if (err != 0) { + printf("Decode failed: %d\n", err); + } + + free(data); + } +} + +void start(void) +{ + printf("Start video\n"); + [VideoRenderer startRendering]; +} + +void stop(void) +{ + printf("Stop video\n"); + [VideoRenderer stopRendering]; +} + +void release(void) +{ + printf("Release video\n"); + nv_avc_destroy(); +} + +-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height +{ + self = [super init]; + host = ipaddr; + + streamConfig.width = width; + streamConfig.height = height; + streamConfig.fps = 30; + + callbacks.setup = setup; + callbacks.start = start; + callbacks.stop = stop; + callbacks.release = release; + callbacks.submitDecodeUnit = submitDecodeUnit; + + return self; +} + +-(void) main +{ + LiStartConnection(host, &streamConfig, &callbacks, NULL, 0); +} + +@end diff --git a/Limelight-iOS/ConnectionHandler.h b/Limelight-iOS/ConnectionHandler.h new file mode 100644 index 0000000..f590e35 --- /dev/null +++ b/Limelight-iOS/ConnectionHandler.h @@ -0,0 +1,19 @@ +// +// ConnectionHandler.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/27/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface ConnectionHandler : NSOperation +@property NSString* hostName; +@property int mode; + ++ (void) pairWithHost:(NSString*)host; ++ (void) streamWithHost:(NSString*)host viewController:(UIViewController*)viewCont; ++ (NSString*) resolveHost:(NSString*)host; + +@end diff --git a/Limelight-iOS/ConnectionHandler.m b/Limelight-iOS/ConnectionHandler.m new file mode 100644 index 0000000..3d5b4a1 --- /dev/null +++ b/Limelight-iOS/ConnectionHandler.m @@ -0,0 +1,335 @@ +// +// ConnectionHandler.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/27/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "ConnectionHandler.h" +#import "AppDelegate.h" +#import +#import "MainFrameViewController.h" + +#include +#include +#include +#include +#include + +@implementation ConnectionHandler +static const int PAIR_MODE = 0x1; +static const int STREAM_MODE = 0x2; + +static MainFrameViewController* viewCont; + ++ (void)streamWithHost:(NSString *)host viewController:(MainFrameViewController *)viewController +{ + viewCont = viewController; + ConnectionHandler* streamHandler = [[ConnectionHandler alloc] initForStream:host]; + [[AppDelegate getMainOpQueue] addOperation:streamHandler]; +} + ++ (void)pairWithHost:(NSString *)host +{ + ConnectionHandler* pairHandler = [[ConnectionHandler alloc] initForPair:host]; + [[AppDelegate getMainOpQueue]addOperation:pairHandler]; +} + ++ (NSString*) resolveHost:(NSString*)host +{ + struct hostent *hostent; + + if (inet_addr([host UTF8String]) != INADDR_NONE) + { + // Already an IP address + return host; + } + else + { + hostent = gethostbyname([host UTF8String]); + if (hostent != NULL) + { + char* ipstr = inet_ntoa(*(struct in_addr*)hostent->h_addr_list[0]); + NSLog(@"Resolved %@ -> %s", host, ipstr); + return [NSString stringWithUTF8String:ipstr]; + } + else + { + NSLog(@"Failed to resolve host: %d", h_errno); + return nil; + } + } +} + ++ (NSString*) getId +{ + // we need to generate some unique id + NSUUID* uniqueId = [ASIdentifierManager sharedManager].advertisingIdentifier; + NSString* idString = [NSString stringWithString:[uniqueId UUIDString]]; + + //use just the last 12 digits because GameStream truncates id's + return [idString substringFromIndex:24]; +} + ++ (NSString*) getName +{ + NSString* name = [UIDevice currentDevice].name; + NSLog(@"Device Name: %@", name); + + //sanitize name + name = [name stringByAddingPercentEscapesUsingEncoding:NSUTF8StringEncoding]; + return name; +} + +- (id) initForStream:(NSString*)host +{ + self = [super init]; + self.hostName = host; + self.mode = STREAM_MODE; + return self; +} + +- (id) initForPair:(NSString*)host +{ + self = [super init]; + self.hostName = host; + self.mode = PAIR_MODE; + return self; +} + +- (void) main +{ + switch (self.mode) + { + case STREAM_MODE: + { + bool pairState; + int err = [self pairState:&pairState]; + if (err) { + NSLog(@"ERROR: Pair state error: %d", err); + break; + } + + if (!pairState) + { + NSLog(@"WARN: Not paired with host!"); + UIAlertView* alert = [[UIAlertView alloc] initWithTitle:@"Not Paired" + message:@"This device is not paired with the host." + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + return; + } + + //TODO: parse this from http request + unsigned int appId = 67339056; + + NSData* applist = [self httpRequest:[NSString stringWithFormat:@"/applist?uniqueid=%@", [ConnectionHandler getId]]]; + + NSString* streamString = [NSString stringWithFormat:@"http://%@:47989/launch?uniqueid=%@&appid=%u", [ConnectionHandler resolveHost:self.hostName], [ConnectionHandler getId], appId]; + NSLog(@"host: %@", self.hostName); + + NSURL* url = [[NSURL alloc] initWithString:streamString]; + + NSMutableURLRequest* request = [[NSMutableURLRequest alloc] initWithURL:url]; + [request setHTTPMethod:@"GET"]; + + NSURLResponse* response = nil; + NSData *response1 = [NSURLConnection sendSynchronousRequest:request returningResponse:&response error:NULL]; + + + [viewCont performSelector:@selector(segueIntoStream) onThread:[NSThread mainThread] withObject:nil waitUntilDone:NO]; + break; + } + case PAIR_MODE: + { + NSLog(@"Trying to pair to %@...", self.hostName); + + bool pairState; + int err = [self pairState:&pairState]; + if (err) { + NSLog(@"ERROR: Pair state error: %d", err); + break; + } + + NSLog(@"Pair state: %i", pairState); + + if(pairState) + { + UIAlertView* alert = [[UIAlertView alloc]initWithTitle:@"Pairing" + message:[NSString stringWithFormat:@"Already paired to:\n %@", self.hostName] + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + NSLog(@"Showing alertview"); + break; + } + + // this will hang until it pairs successfully or unsuccessfully + NSData* pairResponse = [self httpRequest:[NSString stringWithFormat:@"pair?uniqueid=%@&devicename=%@", [ConnectionHandler getId], [ConnectionHandler getName]]]; + + void* buffer = [self getXMLBufferFromData:pairResponse]; + if (buffer == NULL) { + NSLog(@"ERROR: Unable to allocate pair buffer."); + UIAlertView* alert = [[UIAlertView alloc] initWithTitle:@"Pairing" + message:[NSString stringWithFormat:@"Failed to pair with host:\n %@", self.hostName] + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + break; + } + xmlDocPtr doc = xmlParseMemory(buffer, [pairResponse length]-1); + + if (doc == NULL) { + NSLog(@"ERROR: An error occured trying to parse pair response."); + break; + } + + xmlNodePtr node = xmlDocGetRootElement(doc); + node = node->xmlChildrenNode; + while (node != NULL) { + NSLog(@"Node: %s", node->name); + if (!xmlStrcmp(node->name, (const xmlChar*)"sessionid")) + { + xmlChar* sessionid = xmlNodeListGetString(doc, node->xmlChildrenNode, 1); + + if (xmlStrcmp(sessionid, (xmlChar*)"0")) { + UIAlertView* alert = [[UIAlertView alloc] initWithTitle:@"Pairing" + message:[NSString stringWithFormat:@"Successfully paired to host:\n %@", self.hostName] + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + + } else { + UIAlertView* alert = [[UIAlertView alloc] initWithTitle:@"Pairing" + message:[NSString stringWithFormat:@"Pairing was declined by host:\n %@", self.hostName] + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + } + + xmlFree(sessionid); + goto cleanup; + } + node = node->next; + } + + NSLog(@"ERROR: Could not find \"sessionid\" element in XML"); + + cleanup: + xmlFreeNode(node); + break; + } + default: + { + NSLog(@"Invalid connection mode specified!!!"); + } + } +} + +- (int) pairState: (bool*)paired +{ + int err; + NSData* pairData = [self httpRequest:[NSString stringWithFormat:@"pairstate?uniqueid=%@", [ConnectionHandler getId]]]; + + void* buffer = [self getXMLBufferFromData:pairData]; + + if (buffer == NULL) { + NSLog(@"ERROR: Unable to allocate pair state buffer."); + UIAlertView* alert = [[UIAlertView alloc] initWithTitle:@"Pairing" + message:[NSString stringWithFormat:@"Failed to get pair state with host:\n %@", self.hostName] + delegate:nil + cancelButtonTitle:@"Okay" + otherButtonTitles:nil, nil]; + [self showAlert:alert]; + return -1; + } + + xmlDocPtr doc = xmlParseMemory(buffer, [pairData length]-1); + + if (doc == NULL) { + NSLog(@"ERROR: An error occured trying to parse pair state."); + return -1; + } + + xmlNodePtr node = xmlDocGetRootElement(doc); + while (node != NULL) { + NSLog(@"Node: %s", node->name); + if (!xmlStrcmp(node->name, (const xmlChar*)"paired")) + { + xmlChar* pairState = xmlNodeListGetString(doc, node->xmlChildrenNode, 1); + *paired = !xmlStrcmp(pairState, (const xmlChar*)"1"); + err = 0; + + xmlFree(pairState); + goto cleanup; + } + node = node->xmlChildrenNode; + } + + NSLog(@"ERROR: Could not find \"paired\" element in XML"); + err = -2; + +cleanup: + xmlFreeNode(node); + return err; +} + +- (NSData*) httpRequest:(NSString*)args +{ + NSString* requestString = [NSString stringWithFormat:@"http://%@:47989/%@", [ConnectionHandler resolveHost:self.hostName], args]; + + NSLog(@"Making HTTP request: %@", requestString); + + NSURL* url = [[NSURL alloc] initWithString:requestString]; + + NSMutableURLRequest* request = [[NSMutableURLRequest alloc] initWithURL:url]; + [request setHTTPMethod:@"GET"]; + [request setTimeoutInterval:7]; + + NSURLResponse* response = nil; + NSError* error = nil; + NSData* responseData = [NSURLConnection sendSynchronousRequest:request returningResponse:&response error:&error]; + + if (error != nil) { + NSLog(@"An error occured making HTTP request: %@\n Caused by: %@", [error localizedDescription], [error localizedFailureReason]); + } + return responseData; +} + + +- (void*) getXMLBufferFromData:(NSData*)data +{ + if (data == nil) { + NSLog(@"ERROR: No data to get XML from!"); + return NULL; + } + char* buffer = malloc([data length] + 1); + if (buffer == NULL) { + NSLog(@"ERROR: Unable to allocate XML buffer."); + return NULL; + } + [data getBytes:buffer length:[data length]]; + buffer[[data length]] = 0; + + NSLog(@"Buffer: %s", buffer); + //#allthejank + void* newBuffer = (void*)[[[NSString stringWithUTF8String:buffer]stringByReplacingOccurrencesOfString:@"UTF-16" withString:@"UTF-8" options:NSCaseInsensitiveSearch range:NSMakeRange(0, [data length])] UTF8String]; + + NSLog(@"New Buffer: %s", newBuffer); + free(buffer); + return newBuffer; +} + +- (void) showAlert:(UIAlertView*) alert +{ + [alert performSelector:@selector(show) onThread:[NSThread mainThread] withObject:nil waitUntilDone:NO]; +} + +@end diff --git a/Limelight-iOS/Icons/ios7/Icon-40.png b/Limelight-iOS/Icons/ios7/Icon-40.png new file mode 100644 index 0000000..79b3ea4 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-40.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-40@2x.png b/Limelight-iOS/Icons/ios7/Icon-40@2x.png new file mode 100644 index 0000000..7667b75 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-40@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-60.png b/Limelight-iOS/Icons/ios7/Icon-60.png new file mode 100644 index 0000000..8c9505a Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-60.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-60@2x.png b/Limelight-iOS/Icons/ios7/Icon-60@2x.png new file mode 100644 index 0000000..1696de7 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-60@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-72.png b/Limelight-iOS/Icons/ios7/Icon-72.png new file mode 100644 index 0000000..223ab87 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-72.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-72@2x.png b/Limelight-iOS/Icons/ios7/Icon-72@2x.png new file mode 100644 index 0000000..807c2d3 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-72@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-76.png b/Limelight-iOS/Icons/ios7/Icon-76.png new file mode 100644 index 0000000..e4297a5 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-76.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-76@2x.png b/Limelight-iOS/Icons/ios7/Icon-76@2x.png new file mode 100644 index 0000000..ac5c5f0 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-76@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-Small-50.png b/Limelight-iOS/Icons/ios7/Icon-Small-50.png new file mode 100644 index 0000000..fd18417 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-Small-50.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-Small-50@2x.png b/Limelight-iOS/Icons/ios7/Icon-Small-50@2x.png new file mode 100644 index 0000000..e6a0c0e Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-Small-50@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-Small.png b/Limelight-iOS/Icons/ios7/Icon-Small.png new file mode 100644 index 0000000..63658e0 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-Small.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon-Small@2x.png b/Limelight-iOS/Icons/ios7/Icon-Small@2x.png new file mode 100644 index 0000000..716a16a Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon-Small@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon.png b/Limelight-iOS/Icons/ios7/Icon.png new file mode 100644 index 0000000..e8d27d6 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon.png differ diff --git a/Limelight-iOS/Icons/ios7/Icon@2x.png b/Limelight-iOS/Icons/ios7/Icon@2x.png new file mode 100644 index 0000000..a3f9425 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/Icon@2x.png differ diff --git a/Limelight-iOS/Icons/ios7/iTunesArtwork.png b/Limelight-iOS/Icons/ios7/iTunesArtwork.png new file mode 100644 index 0000000..0914e19 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/iTunesArtwork.png differ diff --git a/Limelight-iOS/Icons/ios7/iTunesArtwork@2x.png b/Limelight-iOS/Icons/ios7/iTunesArtwork@2x.png new file mode 100644 index 0000000..0a144b6 Binary files /dev/null and b/Limelight-iOS/Icons/ios7/iTunesArtwork@2x.png differ diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Contents.json b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..2a53bf6 --- /dev/null +++ b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,58 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "size" : "29x29", + "scale" : "2x" + }, + { + "idiom" : "iphone", + "size" : "40x40", + "scale" : "2x" + }, + { + "size" : "60x60", + "idiom" : "iphone", + "filename" : "Icon-60@2x.png", + "scale" : "2x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "1x" + }, + { + "idiom" : "ipad", + "size" : "29x29", + "scale" : "2x" + }, + { + "size" : "40x40", + "idiom" : "ipad", + "filename" : "Icon-40.png", + "scale" : "1x" + }, + { + "size" : "40x40", + "idiom" : "ipad", + "filename" : "Icon-40@2x.png", + "scale" : "2x" + }, + { + "size" : "76x76", + "idiom" : "ipad", + "filename" : "Icon-76.png", + "scale" : "1x" + }, + { + "size" : "76x76", + "idiom" : "ipad", + "filename" : "Icon-76@2x.png", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40.png b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40.png new file mode 100644 index 0000000..79b3ea4 Binary files /dev/null and b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40.png differ diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40@2x.png b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40@2x.png new file mode 100644 index 0000000..7667b75 Binary files /dev/null and b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-40@2x.png differ diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-60@2x.png b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-60@2x.png new file mode 100644 index 0000000..1696de7 Binary files /dev/null and b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-60@2x.png differ diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76.png b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76.png new file mode 100644 index 0000000..e4297a5 Binary files /dev/null and b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76.png differ diff --git a/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76@2x.png b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76@2x.png new file mode 100644 index 0000000..ac5c5f0 Binary files /dev/null and b/Limelight-iOS/Images.xcassets/AppIcon.appiconset/Icon-76@2x.png differ diff --git a/Limelight-iOS/Images.xcassets/LaunchImage.launchimage/Contents.json b/Limelight-iOS/Images.xcassets/LaunchImage.launchimage/Contents.json new file mode 100644 index 0000000..6f870a4 --- /dev/null +++ b/Limelight-iOS/Images.xcassets/LaunchImage.launchimage/Contents.json @@ -0,0 +1,51 @@ +{ + "images" : [ + { + "orientation" : "portrait", + "idiom" : "iphone", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + }, + { + "orientation" : "portrait", + "idiom" : "iphone", + "subtype" : "retina4", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + }, + { + "orientation" : "portrait", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "1x" + }, + { + "orientation" : "landscape", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "1x" + }, + { + "orientation" : "portrait", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + }, + { + "orientation" : "landscape", + "idiom" : "ipad", + "extent" : "full-screen", + "minimum-system-version" : "7.0", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Limelight-iOS/Limelight-iOS b/Limelight-iOS/Limelight-iOS new file mode 160000 index 0000000..8fdc5ad --- /dev/null +++ b/Limelight-iOS/Limelight-iOS @@ -0,0 +1 @@ +Subproject commit 8fdc5ad97b9c5428588fdbd7505d759e9bb8abe8 diff --git a/Limelight-iOS/Limelight-iOS-Info.plist b/Limelight-iOS/Limelight-iOS-Info.plist new file mode 100644 index 0000000..c798ed1 --- /dev/null +++ b/Limelight-iOS/Limelight-iOS-Info.plist @@ -0,0 +1,48 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleDisplayName + ${PRODUCT_NAME} + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIdentifier + com.limelight.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + ${PRODUCT_NAME} + CFBundlePackageType + APPL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1.0 + LSRequiresIPhoneOS + + UIMainStoryboardFile + MainFrame + UIMainStoryboardFile~ipad + MainFrame + UIRequiredDeviceCapabilities + + armv7 + + UIStatusBarHidden~ipad + + UISupportedInterfaceOrientations + + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + + diff --git a/Limelight-iOS/Limelight-iOS-Prefix.pch b/Limelight-iOS/Limelight-iOS-Prefix.pch new file mode 100644 index 0000000..5e79f44 --- /dev/null +++ b/Limelight-iOS/Limelight-iOS-Prefix.pch @@ -0,0 +1,17 @@ +// +// Prefix header +// +// The contents of this file are implicitly included at the beginning of every source file. +// + +#import + +#ifndef __IPHONE_3_0 +#warning "This project uses features only available in iOS SDK 3.0 and later." +#endif + +#ifdef __OBJC__ + #import + #import + #import +#endif diff --git a/Limelight-iOS/Limelight_iOS.xcdatamodeld/.xccurrentversion b/Limelight-iOS/Limelight_iOS.xcdatamodeld/.xccurrentversion new file mode 100644 index 0000000..d7c4be2 --- /dev/null +++ b/Limelight-iOS/Limelight_iOS.xcdatamodeld/.xccurrentversion @@ -0,0 +1,8 @@ + + + + + _XCCurrentVersionName + Limelight_iOS.xcdatamodel + + diff --git a/Limelight-iOS/Limelight_iOS.xcdatamodeld/Limelight_iOS.xcdatamodel/contents b/Limelight-iOS/Limelight_iOS.xcdatamodeld/Limelight_iOS.xcdatamodel/contents new file mode 100644 index 0000000..193f33c --- /dev/null +++ b/Limelight-iOS/Limelight_iOS.xcdatamodeld/Limelight_iOS.xcdatamodel/contents @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/Limelight-iOS/MainFrameViewController.h b/Limelight-iOS/MainFrameViewController.h new file mode 100644 index 0000000..a13c144 --- /dev/null +++ b/Limelight-iOS/MainFrameViewController.h @@ -0,0 +1,21 @@ +// +// MainFrameViewController.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface MainFrameViewController : UIViewController +- (IBAction)StreamButton:(UIButton *)sender; +- (IBAction)PairButton:(UIButton *)sender; +@property (strong, nonatomic) IBOutlet UITextField *HostField; +@property (strong, nonatomic) IBOutlet UIPickerView *StreamConfigs; +@property (strong, nonatomic) NSArray* streamConfigVals; + ++ (const char*)getHostAddr; +- (void) segueIntoStream; + +@end diff --git a/Limelight-iOS/MainFrameViewController.m b/Limelight-iOS/MainFrameViewController.m new file mode 100644 index 0000000..c625d44 --- /dev/null +++ b/Limelight-iOS/MainFrameViewController.m @@ -0,0 +1,78 @@ +// +// MainFrameViewController.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "MainFrameViewController.h" +#import "VideoDepacketizer.h" +#import "ConnectionHandler.h" + +@interface MainFrameViewController () + +@end + +@implementation MainFrameViewController +static NSString* hostAddr; + ++ (const char*)getHostAddr +{ + return [hostAddr UTF8String]; +} + +- (void)PairButton:(UIButton *)sender +{ + NSLog(@"Pair Button Pressed!"); + hostAddr = self.HostField.text; + [ConnectionHandler pairWithHost:hostAddr]; +} + +- (void)StreamButton:(UIButton *)sender +{ + NSLog(@"Stream Button Pressed!"); + hostAddr = self.HostField.text; + [ConnectionHandler streamWithHost:hostAddr viewController:self]; +} + +- (void) segueIntoStream { + [self performSegueWithIdentifier:@"createStreamFrame" sender:self]; +} + +- (NSString *)pickerView:(UIPickerView *)pickerView titleForRow:(NSInteger)row forComponent:(NSInteger)component +{ + return [self.streamConfigVals objectAtIndex:row]; +} + +- (void)pickerView:(UIPickerView *)pickerView didSelectRow:(NSInteger)row inComponent:(NSInteger)component +{ + //TODO: figure out how to save this info!! +} + +// returns the number of 'columns' to display. +- (NSInteger)numberOfComponentsInPickerView:(UIPickerView *)pickerView +{ + return 1; +} + +// returns the # of rows in each component.. +- (NSInteger)pickerView:(UIPickerView *)pickerView numberOfRowsInComponent:(NSInteger)component +{ + return 4; +} + +- (void)viewDidLoad +{ + [super viewDidLoad]; + // Do any additional setup after loading the view. + self.streamConfigVals = [[NSArray alloc] initWithObjects:@"1280x720 (30Hz)",@"1280x720 (60Hz)",@"1920x1080 (30Hz)",@"1920x1080 (60Hz)",nil]; + self.HostField.text = @"neyer.case.edu"; +} + +- (void)didReceiveMemoryWarning +{ + [super didReceiveMemoryWarning]; + // Dispose of any resources that can be recreated. +} +@end diff --git a/Limelight-iOS/StreamFrameViewController.h b/Limelight-iOS/StreamFrameViewController.h new file mode 100644 index 0000000..982a790 --- /dev/null +++ b/Limelight-iOS/StreamFrameViewController.h @@ -0,0 +1,14 @@ +// +// StreamFrameViewController.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import +#import "StreamView.h" + +@interface StreamFrameViewController : UIViewController + +@end diff --git a/Limelight-iOS/StreamFrameViewController.m b/Limelight-iOS/StreamFrameViewController.m new file mode 100644 index 0000000..ae82d49 --- /dev/null +++ b/Limelight-iOS/StreamFrameViewController.m @@ -0,0 +1,61 @@ +// +// StreamFrameViewController.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "StreamFrameViewController.h" +#import "MainFrameViewController.h" +#import "VideoDepacketizer.h" +#import "Connection.h" +#import "VideoRenderer.h" +#import "ConnectionHandler.h" + +#include +#include +#include + +@interface StreamFrameViewController () + +@end + +@implementation StreamFrameViewController + +- (void)viewDidLoad +{ + [super viewDidLoad]; + + [UIApplication sharedApplication].idleTimerDisabled = YES; + + StreamView* streamView = [[StreamView alloc] initWithFrame:self.view.frame]; + streamView.backgroundColor = [UIColor blackColor]; + + [self.view addSubview:streamView]; + [streamView setNeedsDisplay]; + + CGAffineTransform transform = CGAffineTransformMakeTranslation((streamView.frame.size.height/2) - (streamView.frame.size.width/2), (streamView.frame.size.width/2) - (streamView.frame.size.height/2)); + transform = CGAffineTransformRotate(transform, M_PI_2); + transform = CGAffineTransformScale(transform, -1, -1); + streamView.transform = transform; + + // Repositions and resizes the view. + CGRect contentRect = CGRectMake(0,0, self.view.frame.size.width, self.view.frame.size.height); + streamView.bounds = contentRect; + + Connection* conn = [[Connection alloc] initWithHost:inet_addr([[ConnectionHandler resolveHost:[NSString stringWithUTF8String:[MainFrameViewController getHostAddr]]] UTF8String]) width:1280 height:720]; + + NSOperationQueue* opQueue = [[NSOperationQueue alloc] init]; + [opQueue addOperation:conn]; + [opQueue addOperation:[[VideoRenderer alloc]initWithTarget:streamView]]; + +} + +- (void)didReceiveMemoryWarning +{ + [super didReceiveMemoryWarning]; + // Dispose of any resources that can be recreated. +} + +@end diff --git a/Limelight-iOS/StreamView.h b/Limelight-iOS/StreamView.h new file mode 100644 index 0000000..bc5b71b --- /dev/null +++ b/Limelight-iOS/StreamView.h @@ -0,0 +1,13 @@ +// +// StreamView.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface StreamView : UIView + +@end diff --git a/Limelight-iOS/StreamView.m b/Limelight-iOS/StreamView.m new file mode 100644 index 0000000..27aaffa --- /dev/null +++ b/Limelight-iOS/StreamView.m @@ -0,0 +1,73 @@ +// +// StreamView.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "StreamView.h" +#import "VideoDecoder.h" +#import "VideoRenderer.h" + +@implementation StreamView { + size_t width; + size_t height; + size_t bitsPerComponent; + size_t bytesPerRow; + CGColorSpaceRef colorSpace; + CGContextRef bitmapContext; + CGImageRef image; + unsigned char* pixelData; +} + +- (id)initWithFrame:(CGRect)frame +{ + self = [super initWithFrame:frame]; + + // Initialization code + + width = 1280; + height = 720; + bitsPerComponent = 8; + bytesPerRow = (bitsPerComponent / 8) * width * 4; + pixelData = malloc(width * height * 4); + colorSpace = CGColorSpaceCreateDeviceRGB(); + + return self; +} + + + +// Only override drawRect: if you perform custom drawing. +// An empty implementation adversely affects performance during animation. +- (void)drawRect:(CGRect)rect +{ + if (![VideoRenderer isRendering]) { + return; + } + if (!nv_avc_get_rgb_frame((char*)pixelData, width*height*4)) + { + //NSLog(@"no new decoded frame!"); + //return; + } + + bitmapContext = CGBitmapContextCreate(pixelData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Little); + image = CGBitmapContextCreateImage(bitmapContext); + + struct CGContext* context = UIGraphicsGetCurrentContext(); + + CGContextSetBlendMode(context, kCGBlendModeCopy); + CGContextSetInterpolationQuality(context, kCGInterpolationNone); + CGContextSetShouldAntialias(context, false); + CGContextRotateCTM(context, -M_PI_2); + CGContextScaleCTM(context, -(float)self.frame.size.width/self.frame.size.height, (float)self.frame.size.height/self.frame.size.width); + CGContextDrawImage(context, rect, image); + + CGImageRelease(image); + + [super drawRect:rect]; +} + + +@end diff --git a/Limelight-iOS/Video/VideoDepacketizer.h b/Limelight-iOS/Video/VideoDepacketizer.h new file mode 100644 index 0000000..954a24a --- /dev/null +++ b/Limelight-iOS/Video/VideoDepacketizer.h @@ -0,0 +1,21 @@ +// +// VideoDepacketizer.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import +#import "VideoDecoder.h" + +@interface VideoDepacketizer : NSOperation +@property uint8_t* byteBuffer; +@property unsigned int offset; +@property VideoDecoder* decoder; +@property NSString* file; +@property UIView* target; + +- (id) initWithFile:(NSString*) file renderTarget:(UIView*)renderTarget; + +@end diff --git a/Limelight-iOS/Video/VideoDepacketizer.m b/Limelight-iOS/Video/VideoDepacketizer.m new file mode 100644 index 0000000..8154e6f --- /dev/null +++ b/Limelight-iOS/Video/VideoDepacketizer.m @@ -0,0 +1,78 @@ +// +// VideoDepacketizer.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "VideoDepacketizer.h" + +@implementation VideoDepacketizer +static int BUFFER_LENGTH = 131072; + +- (id)initWithFile:(NSString *)file renderTarget:(UIView*)renderTarget +{ + self = [super init]; + self.file = file; + self.target = renderTarget; + return self; +} + +- (void)main +{ + NSInputStream* inStream = [[NSInputStream alloc] initWithFileAtPath:self.file]; + self.byteBuffer = malloc(BUFFER_LENGTH); + self.decoder = [[VideoDecoder alloc]init]; + + + [inStream open]; + while ([inStream streamStatus] != NSStreamStatusOpen) { + NSLog(@"stream status: %d", [inStream streamStatus]); + sleep(1); + } + while (true) + { + unsigned int len = 0; + + len = [inStream read:self.byteBuffer maxLength:BUFFER_LENGTH]; + if (len) + { + BOOL firstStart = false; + for (int i = 0; i < len - 4; i++) { + self.offset++; + if (self.byteBuffer[i] == 0 && self.byteBuffer[i+1] == 0 + && self.byteBuffer[i+2] == 0 && self.byteBuffer[i+3] == 1) + { + if (firstStart) + { + // decode the first i-1 bytes and render a frame + //[self.decoder decode:self.byteBuffer length:i]; + [self.target performSelectorOnMainThread:@selector(setNeedsDisplay) withObject:NULL waitUntilDone:FALSE]; + + // move offset back to beginning of start sequence + [inStream setProperty:[[NSNumber alloc] initWithInt:self.offset-4] forKey:NSStreamFileCurrentOffsetKey]; + self.offset -= 1; + + break; + } else + { + firstStart = true; + } + } + } + } + else + { + NSLog(@"No Buffer! restarting file!"); + // move offset back to beginning of start sequence + self.offset = 0; + [inStream close]; + inStream = [[NSInputStream alloc] initWithFileAtPath:self.file]; + [inStream open]; + } + } + +} + +@end diff --git a/Limelight-iOS/VideoDecoder.h b/Limelight-iOS/VideoDecoder.h new file mode 100644 index 0000000..900a24f --- /dev/null +++ b/Limelight-iOS/VideoDecoder.h @@ -0,0 +1,46 @@ +// +// VideoDecoder.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +// Disables the deblocking filter at the cost of image quality +#define DISABLE_LOOP_FILTER 0x1 +// Uses the low latency decode flag (disables multithreading) +#define LOW_LATENCY_DECODE 0x2 +// Threads process each slice, rather than each frame +#define SLICE_THREADING 0x4 +// Uses nonstandard speedup tricks +#define FAST_DECODE 0x8 +// Uses bilinear filtering instead of bicubic +#define BILINEAR_FILTERING 0x10 +// Uses a faster bilinear filtering with lower image quality +#define FAST_BILINEAR_FILTERING 0x20 +// Disables color conversion (output is NV21) +#define NO_COLOR_CONVERSION 0x40 +// Native color format: RGB0 +#define NATIVE_COLOR_RGB0 0x80 +// Native color format: 0RGB +#define NATIVE_COLOR_0RGB 0x100 +// Native color format: ARGB +#define NATIVE_COLOR_ARGB 0x200 +// Native color format: RGBA +#define NATIVE_COLOR_RGBA 0x400 + +@interface VideoDecoder : NSObject +int nv_avc_init(int width, int height, int perf_lvl, int thread_count); +void nv_avc_destroy(void); + +int nv_avc_get_raw_frame(char* buffer, int size); + +int nv_avc_get_rgb_frame(char* buffer, int size); +int nv_avc_get_rgb_frame_int(int* buffer, int size); +int nv_avc_redraw(void); + +int nv_avc_get_input_padding_size(void); +int nv_avc_decode(unsigned char* indata, int inlen); +@end diff --git a/Limelight-iOS/VideoDecoder.m b/Limelight-iOS/VideoDecoder.m new file mode 100644 index 0000000..716f6c6 --- /dev/null +++ b/Limelight-iOS/VideoDecoder.m @@ -0,0 +1,335 @@ +// +// VideoDecoder.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/18/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "VideoDecoder.h" +#import "libs/ffmpeg/armv7/include/libavcodec/avcodec.h" +#import "swscale.h" +#include + +@implementation VideoDecoder +- (id) init +{ + self = [super init]; + return self; +} + +// General decoder and renderer state +AVPacket pkt; +AVCodec* decoder; +AVCodecContext* decoder_ctx; +AVFrame* yuv_frame; +AVFrame* dec_frame; +pthread_mutex_t mutex; + +// Color conversion and rendering +AVFrame* rgb_frame; +char* rgb_frame_buf; +struct SwsContext* scaler_ctx; +int render_pix_fmt; + +#define BYTES_PER_PIXEL 4 + +// This function must be called before +// any other decoding functions +int nv_avc_init(int width, int height, int perf_lvl, int thread_count) { + int err; + int filtering; + + pthread_mutex_init(&mutex, NULL); + + // Initialize the avcodec library and register codecs + av_log_set_level(AV_LOG_QUIET); + avcodec_register_all(); + + av_init_packet(&pkt); + + decoder = avcodec_find_decoder(AV_CODEC_ID_H264); + if (decoder == NULL) { + // __android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't find H264 decoder"); + return -1; + } + + decoder_ctx = avcodec_alloc_context3(decoder); + if (decoder_ctx == NULL) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't allocate context"); + return -1; + } + + // Show frames even before a reference frame + decoder_ctx->flags2 |= CODEC_FLAG2_SHOW_ALL; + + if (perf_lvl & DISABLE_LOOP_FILTER) { + // Skip the loop filter for performance reasons + decoder_ctx->skip_loop_filter = AVDISCARD_ALL; + } + + if (perf_lvl & LOW_LATENCY_DECODE) { + // Use low delay single threaded encoding + decoder_ctx->flags |= CODEC_FLAG_LOW_DELAY; + } + + if (perf_lvl & SLICE_THREADING) { + decoder_ctx->thread_type = FF_THREAD_SLICE; + } + else { + decoder_ctx->thread_type = FF_THREAD_FRAME; + } + + decoder_ctx->thread_count = thread_count; + + decoder_ctx->width = width; + decoder_ctx->height = height; + decoder_ctx->pix_fmt = PIX_FMT_YUV420P; + + render_pix_fmt = AV_PIX_FMT_BGR0; + + err = avcodec_open2(decoder_ctx, decoder, NULL); + if (err < 0) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't open codec"); + return err; + } + + dec_frame = av_frame_alloc(); + if (dec_frame == NULL) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't allocate frame"); + return -1; + } + + if (!(perf_lvl & NO_COLOR_CONVERSION)) { + rgb_frame = av_frame_alloc(); + if (rgb_frame == NULL) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't allocate frame"); + return -1; + } + + rgb_frame_buf = (char*)av_malloc(width * height * BYTES_PER_PIXEL); + if (rgb_frame_buf == NULL) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't allocate picture"); + return -1; + } + + err = avpicture_fill((AVPicture*)rgb_frame, + rgb_frame_buf, + render_pix_fmt, + decoder_ctx->width, + decoder_ctx->height); + if (err < 0) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't fill picture"); + return err; + } + + if (perf_lvl & FAST_BILINEAR_FILTERING) { + filtering = SWS_FAST_BILINEAR; + } + else if (perf_lvl & BILINEAR_FILTERING) { + filtering = SWS_BILINEAR; + } + else { + filtering = SWS_BICUBIC; + } + + scaler_ctx = sws_getContext(decoder_ctx->width, + decoder_ctx->height, + decoder_ctx->pix_fmt, + decoder_ctx->width, + decoder_ctx->height, + render_pix_fmt, + filtering, + NULL, NULL, NULL); + if (scaler_ctx == NULL) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Couldn't get scaler context"); + return -1; + } + } + + return 0; +} + +// This function must be called after +// decoding is finished +void nv_avc_destroy(void) { + if (decoder_ctx) { + avcodec_close(decoder_ctx); + av_free(decoder_ctx); + decoder_ctx = NULL; + } + if (scaler_ctx) { + sws_freeContext(scaler_ctx); + scaler_ctx = NULL; + } + if (dec_frame) { + av_frame_free(&dec_frame); + dec_frame = NULL; + } + if (yuv_frame) { + av_frame_free(&yuv_frame); + yuv_frame = NULL; + } + if (rgb_frame) { + av_frame_free(&rgb_frame); + rgb_frame = NULL; + } + if (rgb_frame_buf) { + av_free(rgb_frame_buf); + rgb_frame_buf = NULL; + } + + pthread_mutex_destroy(&mutex); +} + +static AVFrame* dequeue_new_frame(void) { + AVFrame *our_yuv_frame = NULL; + + pthread_mutex_lock(&mutex); + + // Check if there's a new frame + if (yuv_frame) { + // We now own the decoder's frame and are + // responsible for freeing it when we're done + our_yuv_frame = yuv_frame; + yuv_frame = NULL; + } + + pthread_mutex_unlock(&mutex); + + return our_yuv_frame; +} + +static int update_rgb_frame(void) { + AVFrame *our_yuv_frame; + int err; + + our_yuv_frame = dequeue_new_frame(); + if (our_yuv_frame == NULL) { + return 0; + } + + // Convert the YUV image to RGB + err = sws_scale(scaler_ctx, + our_yuv_frame->data, + our_yuv_frame->linesize, + 0, + decoder_ctx->height, + rgb_frame->data, + rgb_frame->linesize); + + av_frame_free(&our_yuv_frame); + + if (err != decoder_ctx->height) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Scaling failed"); + return 0; + } + + return 1; +} + +static int render_rgb_to_buffer(char* buffer, int size) { + int err; + + // Draw the frame to the buffer + err = avpicture_layout((AVPicture*)rgb_frame, + render_pix_fmt, + decoder_ctx->width, + decoder_ctx->height, + buffer, + size); + if (err < 0) { + // __android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Picture fill failed"); + return 0; + } + + return 1; +} + +int nv_avc_get_raw_frame(char* buffer, int size) { + AVFrame *our_yuv_frame; + int err; + + our_yuv_frame = dequeue_new_frame(); + if (our_yuv_frame == NULL) { + return 0; + } + + err = avpicture_layout((AVPicture*)our_yuv_frame, + decoder_ctx->pix_fmt, + decoder_ctx->width, + decoder_ctx->height, + buffer, + size); + + av_frame_free(&our_yuv_frame); + + return (err >= 0); +} + +int nv_avc_get_rgb_frame(char* buffer, int size) { + return (update_rgb_frame() && render_rgb_to_buffer(buffer, size)); +} + +int nv_avc_get_input_padding_size(void) { + return FF_INPUT_BUFFER_PADDING_SIZE; +} + +// packets must be decoded in order +// indata must be inlen + FF_INPUT_BUFFER_PADDING_SIZE in length +int nv_avc_decode(unsigned char* indata, int inlen) { + int err = 0; + int got_pic = 0; + + pkt.data = indata; + pkt.size = inlen; + + while (pkt.size > 0) { + got_pic = 0; + err = avcodec_decode_video2( + decoder_ctx, + dec_frame, + &got_pic, + &pkt); + if (err < 0) { + //__android_log_write(ANDROID_LOG_ERROR, "NVAVCDEC", + // "Decode failed"); + got_pic = 0; + break; + } + + pkt.size -= err; + pkt.data += err; + } + + // Only copy the picture at the end of decoding the packet + if (got_pic) { + pthread_mutex_lock(&mutex); + + // Only clone this frame if the last frame was taken. + // This saves on extra copies for frames that don't get + // rendered. + if (yuv_frame == NULL) { + // Clone a new frame + yuv_frame = av_frame_clone(dec_frame); + } + + pthread_mutex_unlock(&mutex); + } + + return err < 0 ? err : 0; +} + + + +@end diff --git a/Limelight-iOS/VideoRenderer.h b/Limelight-iOS/VideoRenderer.h new file mode 100644 index 0000000..813597b --- /dev/null +++ b/Limelight-iOS/VideoRenderer.h @@ -0,0 +1,18 @@ +// +// VideoRenderer.h +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/19/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import +#import + +@interface VideoRenderer : NSOperation +@property UIView* renderTarget; +- (id) initWithTarget:(UIView*)target; ++ (void) startRendering; ++ (void) stopRendering; ++ (BOOL) isRendering; +@end diff --git a/Limelight-iOS/VideoRenderer.m b/Limelight-iOS/VideoRenderer.m new file mode 100644 index 0000000..a5e685c --- /dev/null +++ b/Limelight-iOS/VideoRenderer.m @@ -0,0 +1,51 @@ +// +// VideoRenderer.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/19/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import "VideoRenderer.h" + +@implementation VideoRenderer +static bool render = false; + +- (id)initWithTarget:(UIView *)target +{ + self = [super init]; + + self.renderTarget = target; + + return self; +} + +- (void)main +{ + while (true) + { + if (render) + { + [self.renderTarget performSelectorOnMainThread:@selector(setNeedsDisplay) withObject:NULL waitUntilDone:TRUE]; + usleep(10000); + } else { + sleep(1); + } + } +} + ++ (void) startRendering +{ + render = true; +} + ++ (void) stopRendering +{ + render = false; +} + ++ (BOOL) isRendering +{ + return render; +} +@end diff --git a/Limelight-iOS/en.lproj/InfoPlist.strings b/Limelight-iOS/en.lproj/InfoPlist.strings new file mode 100644 index 0000000..477b28f --- /dev/null +++ b/Limelight-iOS/en.lproj/InfoPlist.strings @@ -0,0 +1,2 @@ +/* Localized versions of Info.plist keys */ + diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avcodec.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avcodec.h new file mode 100644 index 0000000..29570a5 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avcodec.h @@ -0,0 +1,5031 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + + +#include + +#include "samplefmt.h" +#include "attributes.h" +#include "avutil.h" +#include "buffer.h" +#include "cpu.h" +#include "channel_layout.h" +#include "dict.h" +#include "frame.h" +#include "log.h" +#include "pixfmt.h" +#include "rational.h" + +#include "version.h" + +/** + * @defgroup libavc Encoding/Decoding Library + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + * + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of a existing codec ID changes (that would break ABI), + * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec. + * This ensures that 2 forks can independently add AVCodecIDs without producing conflicts. + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + AV_CODEC_ID_MPEG2VIDEO_XVMC, + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, + AV_CODEC_ID_IFF_BYTERUN1, + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130_DEPRECATED, + AV_CODEC_ID_G2M_DEPRECATED, + AV_CODEC_ID_WEBP_DEPRECATED, + + AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'), + AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + AV_CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + AV_CODEC_ID_EXR = MKBETAG('0','E','X','R'), + AV_CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + AV_CODEC_ID_012V = MKBETAG('0','1','2','V'), + AV_CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + AV_CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + AV_CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'), + AV_CODEC_ID_V308 = MKBETAG('V','3','0','8'), + AV_CODEC_ID_V408 = MKBETAG('V','4','0','8'), + AV_CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + AV_CODEC_ID_SANM = MKBETAG('S','A','N','M'), + AV_CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + AV_CODEC_ID_AVRN = MKBETAG('A','V','R','n'), + AV_CODEC_ID_CPIA = MKBETAG('C','P','I','A'), + AV_CODEC_ID_XFACE = MKBETAG('X','F','A','C'), + AV_CODEC_ID_SGIRLE = MKBETAG('S','G','I','R'), + AV_CODEC_ID_MVC1 = MKBETAG('M','V','C','1'), + AV_CODEC_ID_MVC2 = MKBETAG('M','V','C','2'), + AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'), + AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'), + AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'), + AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'), +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'), + AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'), + AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16), + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '), + AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '), + AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '), + AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '), + AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'), + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE + AV_CODEC_ID_VOXWARE, +#endif + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS_DEPRECATED, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK_DEPRECATED, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + AV_CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + AV_CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'), + AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'), + AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'), + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + AV_CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + AV_CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + AV_CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + AV_CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + AV_CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'), + AV_CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + AV_CODEC_ID_SUBRIP = MKBETAG('S','R','i','p'), + AV_CODEC_ID_WEBVTT = MKBETAG('W','V','T','T'), + AV_CODEC_ID_MPL2 = MKBETAG('M','P','L','2'), + AV_CODEC_ID_VPLAYER = MKBETAG('V','P','l','r'), + AV_CODEC_ID_PJS = MKBETAG('P','h','J','S'), + AV_CODEC_ID_ASS = MKBETAG('A','S','S',' '), ///< ASS as defined in Matroska + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + AV_CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + AV_CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + AV_CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'), + AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'), + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#if FF_API_CODEC_ID +#include "old_codec_ids.h" +#endif +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see avcodec_get_descriptor() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 16 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define FF_MIN_BUFFER_SIZE 16384 + + +/** + * @ingroup lavc_encoding + * motion estimation type. + */ +enum Motion_Est_ID { + ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed + ME_FULL, + ME_LOG, + ME_PHODS, + ME_EPZS, ///< enhanced predictive zonal search + ME_X1, ///< reserved for experiments + ME_HEX, ///< hexagon based search + ME_UMH, ///< uneven multi-hexagon search + ME_TESA, ///< transformed exhaustive search algorithm + ME_ITER=50, ///< iterative search +}; + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVColorPrimaries{ + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_BT470M = 4, + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, + AVCOL_PRI_NB , ///< Not part of ABI +}; + +enum AVColorTransferCharacteristic{ + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_NB , ///< Not part of ABI +}; + +/** + * X X 3 4 X X are luma samples, + * 1 2 1-6 are possible chroma positions + * X X 5 6 X 0 is undefined/unknown position + */ +enum AVChromaLocation{ + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default + AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263 + AVCHROMA_LOC_TOPLEFT = 3, ///< DV + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB , ///< Not part of ABI +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +#define FF_MAX_B_FRAMES 16 + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED 0x0001 +#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. +#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. +#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. +/** + * The parent program guarantees that the input for B-frames containing + * streams is not written to for at least s->max_b_frames+1 frames, if + * this is not set the input will be copied. + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. +#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. +#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. +#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. +#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. +#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random + location instead of only at frame boundaries. */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization. +#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT. +#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay. +#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe. +#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). +/* Fx : Flag for h263+ extra options */ +#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction +#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter +#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation +#define CODEC_FLAG_CLOSED_GOP 0x80000000 +#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. +#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. +#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. +#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!! +#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS. + +#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. +#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 0x0002 +#define CODEC_CAP_TRUNCATED 0x0008 +/* Codec can export data for HW decoding (XvMC). */ +#define CODEC_CAP_HWACCEL 0x0010 +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY 0x0020 +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME 0x0040 +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU 0x0080 +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carring such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES 0x0100 +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL 0x0200 +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF 0x0400 + +/** + * Codec is able to deal with negative linesizes + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 + +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS 0x1000 +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS 0x2000 +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE 0x4000 +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS 0x8000 +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000 +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS 0x80000000 + +//The following defines may change, don't expect compatibility if you use them. +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan{ + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +}AVPanScan; + +#define FF_QSCALE_TYPE_MPEG1 0 +#define FF_QSCALE_TYPE_MPEG2 1 +#define FF_QSCALE_TYPE_H264 2 +#define FF_QSCALE_TYPE_VP56 3 + +#if FF_API_GET_BUFFER +#define FF_BUFFER_TYPE_INTERNAL 1 +#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) +#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. +#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. + +#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). +#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. +#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. +#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). +#endif + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + AV_PKT_DATA_PALETTE, + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES=70, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, +}; + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf or destruct (deprecated) + * fields. If either is set, the packet data is dynamically allocated and is + * valid indefinitely until av_free_packet() is called (which in turn calls + * av_buffer_unref()/the destruct callback to free the data). If neither is set, + * the packet data is typically backed by some static buffer somewhere and is + * only valid for a limited time (e.g. until the next read call when demuxing). + * + * The side data is always allocated with av_malloc() and is freed in + * av_free_packet(). + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + struct { + uint8_t *data; + int size; + enum AVPacketSideDataType type; + } *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int duration; +#if FF_API_DESTRUCT_PACKET + attribute_deprecated + void (*destruct)(struct AVPacket *); + attribute_deprecated + void *priv; +#endif + int64_t pos; ///< byte position in stream, -1 if unknown + + /** + * Time difference in AVStream->time_base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current packet. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; + char codec_name[32]; + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + + /** + * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * - encoding: unused + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int stream_codec_tag; + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream. + */ + int bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * mjpeg: Huffman tables + * rv10: additional flags + * mpeg4: global headers (they can be in the bitstream or here) + * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * - encoding: MUST be set by user. + * - decoding: Set by libavcodec. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this is the number of "priming" samples added to the + * beginning of the stream. The decoded output will be delayed by this + * many samples relative to the input to the encoder. Note that this + * field is purely informational and does not directly affect the pts + * output by the encoder, which should always be based on the actual + * presentation time, including any delay. + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required. + */ + int coded_width, coded_height; + +#define FF_ASPECT_EXTENDED 15 + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec if known + */ + enum AVPixelFormat pix_fmt; + + /** + * Motion estimation algorithm used for video coding. + * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex), + * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific] + * - encoding: MUST be set by user. + * - decoding: unused + */ + int me_method; + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + + /** obsolete FIXME remove */ + int rc_strategy; +#define FF_RC_STRATEGY_XVID 1 + + int b_frame_strategy; + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + + /** + * 0-> h263 quant 1-> mpeg quant + * - encoding: Set by user. + * - decoding: unused + */ + int mpeg_quant; + + /** + * qscale factor between P and I-frames + * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + /** + * prediction method (needed for huffyuv) + * - encoding: Set by user. + * - decoding: unused + */ + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + + /** + * prepass for motion estimation + * - encoding: Set by user. + * - decoding: unused + */ + int pre_me; + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + + /** + * DTG active format information (additional aspect ratio + * information only used in DVB MPEG-2 transport streams) + * 0 if not set. + * + * - encoding: unused + * - decoding: Set by decoder. + */ + int dtg_active_format; +#define FF_DTG_AFD_SAME 8 +#define FF_DTG_AFD_4_3 9 +#define FF_DTG_AFD_16_9 10 +#define FF_DTG_AFD_14_9 11 +#define FF_DTG_AFD_4_3_SP_14_9 13 +#define FF_DTG_AFD_16_9_SP_14_9 14 +#define FF_DTG_AFD_SP_4_3 15 + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + + /** + * intra quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int intra_quant_bias; +#define FF_DEFAULT_QUANT_BIAS 999999 + + /** + * inter quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int inter_quant_bias; + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + + /** + * XVideo Motion Acceleration + * - encoding: forbidden + * - decoding: set by decoder + */ + int xvmc_acceleration; + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + + /** + * scene change detection threshold + * 0 is default, larger means fewer detected scene changes. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_threshold; + + /** + * noise reduction strength + * - encoding: Set by user. + * - decoding: unused + */ + int noise_reduction; + + /** + * Motion estimation threshold below which no motion estimation is + * performed, but instead the user specified motion vectors are used. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_threshold; + + /** + * Macroblock threshold below which the user specified macroblock types will be used. + * - encoding: Set by user. + * - decoding: unused + */ + int mb_threshold; + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: unused + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + + /** + * Border processing masking, raises the quantizer for mbs on the borders + * of the picture. + * - encoding: Set by user. + * - decoding: unused + */ + float border_masking; + + /** + * minimum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_penalty_compensation; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int brd_scale; + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + + /** + * chroma qp offset from luma + * - encoding: Set by user. + * - decoding: unused + */ + int chromaoffset; + + /** + * Multiplied by qscale for each frame and added to scene_change_score. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_factor; + + /** + * + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + + /** + * Adjust sensitivity of b_frame_strategy 1. + * - encoding: Set by user. + * - decoding: unused + */ + int b_sensitivity; + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + +#if FF_API_REQUEST_CHANNELS + /** + * Decoder should decode to this many channels if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + * @deprecated Deprecated in favor of request_channel_layout. + */ + attribute_deprecated int request_channels; +#endif + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + +#if FF_API_GET_BUFFER + /** + * Called at the beginning of each frame to get a buffer for it. + * + * The function will set AVFrame.data[], AVFrame.linesize[]. + * AVFrame.extended_data[] must also be set, but it should be the same as + * AVFrame.data[] except for planar audio with more channels than can fit + * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as + * many data pointers as it can hold. + * + * if CODEC_CAP_DR1 is not set then get_buffer() must call + * avcodec_default_get_buffer() instead of providing buffers allocated by + * some other means. + * + * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't + * need it. avcodec_default_get_buffer() aligns the output buffer properly, + * but if get_buffer() is overridden then alignment considerations should + * be taken into account. + * + * @see avcodec_default_get_buffer() + * + * Video: + * + * If pic.reference is set then the frame will be read later by libavcodec. + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * it may be called from a different thread, but not from more than one at + * once. Does not need to be reentrant. + * + * @see release_buffer(), reget_buffer() + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * Decoders cannot use the buffer after returning from + * avcodec_decode_audio4(), so they will not call release_buffer(), as it + * is assumed to be released immediately upon return. In some rare cases, + * a decoder may need to call get_buffer() more than once in a single + * call to avcodec_decode_audio4(). In that case, when get_buffer() is + * called again after it has already been called once, the previously + * acquired buffer is assumed to be released at that time and may not be + * reused by the decoder. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated use get_buffer2() + */ + attribute_deprecated + int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called to release buffers which were allocated with get_buffer. + * A released buffer can be reused in get_buffer(). + * pic.data[*] must be set to NULL. + * May be called from a different thread if frame multithreading is used, + * but not by more than one thread at once, so does not need to be reentrant. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated custom freeing callbacks should be set from get_buffer2() + */ + attribute_deprecated + void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called at the beginning of a frame to get cr buffer for it. + * Buffer type (size, hints) must be the same. libavcodec won't check it. + * libavcodec will pass previous buffer in pic, function should return + * same buffer or new buffer with old frame "painted" into it. + * If pic.data[0] == NULL must behave like get_buffer(). + * if CODEC_CAP_DR1 is not set then reget_buffer() must call + * avcodec_default_reget_buffer() instead of providing buffers allocated by + * some other means. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + attribute_deprecated + int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); +#endif + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with av_malloc() and will be freed in + * av_frame_unref(). + * * otherwise exended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), + * and av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * av_frame_unref(). + * + * If CODEC_CAP_DR1 is not set then get_buffer2() must call + * avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an + * edge of the size returned by avcodec_get_edge_width() on all sides. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + + /** + * ratecontrol qmin qmax limiting method + * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax. + * - encoding: Set by user. + * - decoding: unused + */ + float rc_qsquish; + + float rc_qmod_amp; + int rc_qmod_freq; + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + + /** + * rate control equation + * - encoding: Set by user + * - decoding: unused + */ + const char *rc_eq; + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_min_rate; + + float rc_buffer_aggressivity; + + /** + * initial complexity for pass1 ratecontrol + * - encoding: Set by user. + * - decoding: unused + */ + float rc_initial_cplx; + + /** + * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 +#define FF_CODER_TYPE_DEFLATE 4 + /** + * coder type + * - encoding: Set by user. + * - decoding: unused + */ + int coder_type; + + /** + * context model + * - encoding: Set by user. + * - decoding: unused + */ + int context_model; + + /** + * minimum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmin; + + /** + * maximum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmax; + + /** + * frame skip threshold + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_threshold; + + /** + * frame skip factor + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_factor; + + /** + * frame skip exponent + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_exp; + + /** + * frame skip comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_cmp; + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int min_prediction_order; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int max_prediction_order; + + /** + * GOP timecode frame start number + * - encoding: Set by user, in non drop frame format + * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset) + */ + int64_t timecode_frame_start; + + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); + + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ + + /* statistics, used for 2-pass encoding */ + int mv_bits; + int header_bits; + int i_tex_bits; + int p_tex_bits; + int i_count; + int p_count; + int skip_count; + int misc_bits; + + /** + * number of bits used for the previously encoded frame + * - encoding: Set by libavcodec. + * - decoding: unused + */ + int frame_bits; + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#define FF_BUG_OLD_MSMPEG4 2 +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 + + /** + * strictly follow the standard (MPEG4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#define FF_DEBUG_MV 32 +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#define FF_DEBUG_PTS 0x00000200 +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; +#define AV_EF_CRCCHECK (1<<0) ///< verify embedded CRCs +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliancies as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * @deprecated in favor of pkt_pts + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#define FF_IDCT_SH4 9 +#define FF_IDCT_SIMPLEARM 10 +#define FF_IDCT_IPP 13 +#define FF_IDCT_XVIDMMX 14 +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#define FF_IDCT_SIMPLEVIS 18 +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#define FF_IDCT_SIMPLEALPHA 23 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + AVFrame *coded_frame; + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * thread opaque + * Can be used by execute() to store some per AVCodecContext stuff. + * - encoding: set by execute() + * - decoding: set by execute() + */ + void *thread_opaque; + + /** + * noise vs. sse weight for the nsse comparsion function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + + /** + * Simulates errors in the bitstream to test error concealment. + * - encoding: Set by user. + * - decoding: unused + */ + int error_rate; + + /** + * Current packet as passed into the decoder, to avoid having + * to pass the packet into every function. Currently only valid + * inside lavc and get/release_buffer callbacks. + * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts + * - encoding: unused + */ + AVPacket *pkt; + + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + */ + uint64_t vbv_delay; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_pkt_timebase(avctx) + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_codec_descriptor(avctx) + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * Code outside libavcodec should access this field using AVOptions + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; +} AVCodecContext; + +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +int av_codec_get_lowres(const AVCodecContext *avctx); +void av_codec_set_lowres(AVCodecContext *avctx, int val); + +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 +#if FF_API_LOWRES + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() +#endif + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from avcodec_register(). + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); +} AVCodec; + +int av_codec_get_max_lowres(const AVCodec *codec); + +/** + * AVHWAccel. + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see FF_HWACCEL_CODEC_CAP_* + */ + int capabilities; + + struct AVHWAccel *next; + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of HW accelerator private data. + * + * Private data is allocated with av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int priv_data_size; +} AVHWAccel; + +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + */ +typedef struct AVPicture { + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + + /** + * data+linesize for the bitmap of this subtitle. + * can be set for text/ass as well once they where rendered + */ + AVPicture pict; + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(const AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + + +#if FF_API_ALLOC_CONTEXT +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by simply calling av_free(). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + * + * @deprecated use avcodec_alloc_context3() + */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context(void); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context2(enum AVMediaType); + +/** + * Set the fields of the given AVCodecContext to default values. + * + * @param s The AVCodecContext of which the fields should be set to default values. + * @deprecated use avcodec_get_context_defaults3 + */ +attribute_deprecated +void avcodec_get_context_defaults(AVCodecContext *s); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); +#endif + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by calling avcodec_close() on it followed + * by av_free(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Set the fields of the given AVCodecContext to default values corresponding + * to the given codec (defaults may be codec-dependent). + * + * Do not call this function if a non-NULL codec has been passed + * to avcodec_alloc_context3() that allocated this AVCodecContext. + * If codec is non-NULL, it is illegal to call avcodec_open2() with a + * different codec on this AVCodecContext. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + */ +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using avcodec_free_frame(). + * + * @return An AVFrame filled with default values or NULL on failure. + * @see avcodec_get_frame_defaults + */ +AVFrame *avcodec_alloc_frame(void); + +/** + * Set the fields of the given AVFrame to default values. + * + * @param frame The AVFrame of which the fields should be set to default values. + */ +void avcodec_get_frame_defaults(AVFrame *frame); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. + * + * @param frame frame to be freed. The pointer will be set to NULL. + * + * @warning this function does NOT free the data buffers themselves + * (it does not know how, since they might have been allocated with + * a custom get_buffer()). + */ +void avcodec_free_frame(AVFrame **frame); + +#if FF_API_AVCODEC_OPEN +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated. + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open(context, codec) < 0) + * exit(1); + * @endcode + * + * @param avctx The context which will be set up to use the given codec. + * @param codec The codec to use within the context. + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close + * + * @deprecated use avcodec_open2 + */ +attribute_deprecated +int avcodec_open(AVCodecContext *avctx, AVCodec *codec); +#endif + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * avcodec_get_context_defaults3() for this context, then this + * parameter MUST be either NULL or equal to the previously passed + * codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() / + * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will + * do nothing. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +#if FF_API_DESTRUCT_PACKET +/** + * Default packet destructor. + * @deprecated use the AVBuffer API instead + */ +attribute_deprecated +void av_destruct_packet(AVPacket *pkt); +#endif + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + */ +int av_dup_packet(AVPacket *pkt); + +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet(AVPacket *dst, AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet_side_data(AVPacket *dst, AVPacket *src); + +/** + * Free a packet. + * + * @param pkt packet to free + */ +void av_free_packet(AVPacket *pkt); + +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +int av_packet_merge_side_data(AVPacket *pkt); + +int av_packet_split_side_data(AVPacket *pkt); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + * + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder_by_name(const char *name); + +#if FF_API_GET_BUFFER +attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); +#endif + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * CODEC_CAP_DR1 set. + */ +int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +/** + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. + * + * @return Required padding in pixels. + */ +unsigned avcodec_get_edge_width(void); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +#if FF_API_OLD_DECODE_AUDIO +/** + * Wrapper function which calls avcodec_decode_audio4. + * + * @deprecated Use avcodec_decode_audio4 instead. + * + * Decode the audio frame of size avpkt->size from avpkt->data into samples. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio3 has to be called again with an AVPacket that contains + * the remaining data in order to decode the second frame etc. + * If no frame + * could be outputted, frame_size_ptr is zero. Otherwise, it is the + * decompressed frame size in bytes. + * + * @warning You must set frame_size_ptr to the allocated size of the + * output buffer before calling avcodec_decode_audio3(). + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @warning You must not provide a custom get_buffer() when using + * avcodec_decode_audio3(). Doing so will override it with + * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, + * which does allow the application to provide a custom get_buffer(). + * + * @note You might have to align the input buffer avpkt->data and output buffer + * samples. The alignment requirements depend on the CPU: On some CPUs it isn't + * necessary at all, on others it won't work at all if not aligned and on others + * it will work but it will have an impact on performance. + * + * In practice, avpkt->data should have 4 byte alignment at minimum and + * samples should be 16 byte aligned unless the CPU doesn't need it + * (AltiVec and SSE do). + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] samples the output buffer, sample type in avctx->sample_fmt + * If the sample format is planar, each channel plane will + * be the same size, with no padding between channels. + * @param[in,out] frame_size_ptr the output buffer size in bytes + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields. + * All decoders are designed to use the least fields possible though. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame data was decompressed (used) from the input AVPacket. + */ +attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, + int *frame_size_ptr, + AVPacket *avpkt); +#endif + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + */ +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + */ +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @param avctx the codec context + * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be + freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + + /** + * Time difference in stream time base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current frame. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +#if FF_API_OLD_ENCODE_AUDIO +/** + * Encode an audio frame from samples into buf. + * + * @deprecated Use avcodec_encode_audio2 instead. + * + * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. + * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user + * will know how much space is needed because it depends on the value passed + * in buf_size as described below. In that case a lower value can be used. + * + * @param avctx the codec context + * @param[out] buf the output buffer + * @param[in] buf_size the output buffer size + * @param[in] samples the input buffer containing the samples + * The number of samples read from this buffer is frame_size*channels, + * both of which are defined in avctx. + * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of + * samples read from samples is equal to: + * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) + * This also implies that av_get_bits_per_sample() must not return 0 for these + * codecs. + * @return On error a negative value is returned, on success zero or the number + * of bytes used to encode the data read from the input buffer. + */ +int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, + uint8_t *buf, int buf_size, + const short *samples); +#endif + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +#if FF_API_OLD_ENCODE_VIDEO +/** + * @deprecated use avcodec_encode_video2() instead. + * + * Encode a video frame from pict into buf. + * The input picture should be + * stored using a specific format, namely avctx.pix_fmt. + * + * @param avctx the codec context + * @param[out] buf the output buffer for the bitstream of encoded frame + * @param[in] buf_size the size of the output buffer in bytes + * @param[in] pict the input picture to encode + * @return On error a negative value is returned, on success zero or the number + * of bytes used from the output buffer. + */ +attribute_deprecated +int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVFrame *pict); +#endif + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVCODEC_RESAMPLE +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * @deprecated use libswresample instead + * + * @{ + */ +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +attribute_deprecated +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +attribute_deprecated +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +attribute_deprecated +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +attribute_deprecated +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +attribute_deprecated +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +attribute_deprecated +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +attribute_deprecated +void av_resample_close(struct AVResampleContext *c); + +/** + * @} + */ +#endif + +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * Allocate memory for the pixels of a picture and setup the AVPicture + * fields for it. + * + * Call avpicture_free() to free it. + * + * @param picture the picture structure to be filled in + * @param pix_fmt the pixel format of the picture + * @param width the width of the picture + * @param height the height of the picture + * @return zero if successful, a negative error code otherwise + * + * @see av_image_alloc(), avpicture_fill() + */ +int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Free a picture previously allocated by avpicture_alloc(). + * The data buffer used by the AVPicture is freed, but the AVPicture structure + * itself is not. + * + * @param picture the AVPicture to be freed + */ +void avpicture_free(AVPicture *picture); + +/** + * Setup the picture fields based on the specified image parameters + * and the provided image data buffer. + * + * The picture fields are filled in by using the image data buffer + * pointed to by ptr. + * + * If ptr is NULL, the function will fill only the picture linesize + * array and return the required size for the image buffer. + * + * To allocate an image buffer and fill the picture data in one call, + * use avpicture_alloc(). + * + * @param picture the picture to be filled in + * @param ptr buffer where the image data is stored, or NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return the size in bytes required for src, a negative error code + * in case of failure + * + * @see av_image_fill_arrays() + */ +int avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy pixel data from an AVPicture into a buffer. + * + * avpicture_get_size() can be used to compute the required size for + * the buffer to fill. + * + * @param src source picture with filled data + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @param dest destination buffer + * @param dest_size destination buffer size in bytes + * @return the number of bytes written to dest, or a negative value + * (error code) on error, for example if the destination buffer is not + * big enough + * + * @see av_image_copy_to_buffer() + */ +int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * Calculate the size in bytes that a picture of the given width and height + * would occupy if stored in the given picture format. + * + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @return the computed picture buffer size or a negative error code + * in case of error + * + * @see av_image_get_buffer_size(). + */ +int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +#if FF_API_DEINTERLACE +/** + * deinterlace - if not supported return -1 + * + * @deprecated - use yadif (in libavfilter) instead + */ +attribute_deprecated +int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); +#endif +/** + * Copy image src to dst. Wraps av_image_copy(). + */ +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Crop image top and left side. + */ +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * Pad image. + */ +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample + * for one that returns a failure code and continues in case of invalid + * pix_fmts. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @see av_pix_fmt_get_chroma_sub_sample + */ + +void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * avcodec_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format and a selection of two destination pixel formats. When converting from + * one pixel format to another, information loss may occur. For example, when converting + * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when + * converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of + * the given pixel formats should be used to suffer the least amount of loss. + * + * If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be + * returned. + * + * @code + * src_pix_fmt = AV_PIX_FMT_YUV420P; + * dst_pix_fmt1= AV_PIX_FMT_RGB24; + * dst_pix_fmt2= AV_PIX_FMT_GRAY8; + * dst_pix_fmt3= AV_PIX_FMT_RGB8; + * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored. + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss); + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss); + * @endcode + * + * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from + * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from + * @param[in] src_pix_fmt Source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e. + * NULL or value of zero means we care about all losses. Out: the loss + * that occurs when converting from src to selected dst pixel format. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); +#else +enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); +#endif + + +enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); + +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf_size size in bytes of buf + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + */ +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + + +typedef struct AVBitStreamFilterContext { + void *priv_data; + struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; +} AVBitStreamFilterContext; + + +typedef struct AVBitStreamFilter { + const char *name; + int priv_data_size; + int (*filter)(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + void (*close)(AVBitStreamFilterContext *bsfc); + struct AVBitStreamFilter *next; +} AVBitStreamFilter; + +/** + * Register a bitstream filter. + * + * The filter will be accessible to the application code through + * av_bitstream_filter_next() or can be directly initialized with + * av_bitstream_filter_init(). + * + * @see avcodec_register_all() + */ +void av_register_bitstream_filter(AVBitStreamFilter *bsf); + +/** + * Create and initialize a bitstream filter context given a bitstream + * filter name. + * + * The returned context must be freed with av_bitstream_filter_close(). + * + * @param name the name of the bitstream filter + * @return a bitstream filter context if a matching filter was found + * and successfully initialized, NULL otherwise + */ +AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); + +/** + * Filter bitstream. + * + * This function filters the buffer buf with size buf_size, and places the + * filtered buffer in the buffer pointed to by poutbuf. + * + * The output buffer must be freed by the caller. + * + * @param bsfc bitstream filter context created by av_bitstream_filter_init() + * @param avctx AVCodecContext accessed by the filter, may be NULL. + * If specified, this must point to the encoder context of the + * output stream the packet is sent to. + * @param args arguments which specify the filter configuration, may be NULL + * @param poutbuf pointer which is updated to point to the filtered buffer + * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes + * @param buf buffer containing the data to filter + * @param buf_size size in bytes of buf + * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data + * @return >= 0 in case of success, or a negative error code in case of failure + * + * If the return value is positive, an output buffer is allocated and + * is availble in *poutbuf, and is distinct from the input buffer. + * + * If the return value is 0, the output buffer is not allocated and + * should be considered identical to the input buffer, or in case + * *poutbuf was set it points to the input buffer (not necessarily to + * its starting address). + */ +int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + +/** + * Release bitstream filter context. + * + * @param bsf the bitstream filter context created with + * av_bitstream_filter_init(), can be NULL + */ +void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); + +/** + * If f is NULL, return the first registered bitstream filter, + * if f is non-NULL, return the next registered bitstream filter + * after f, or NULL if f is the last one. + * + * This function can be used to iterate over all registered bitstream + * filters. + */ +AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); + +/* memory */ + +/** + * Reallocate the given block if it is not large enough, otherwise do nothing. + * + * @see av_realloc + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_malloc but the buffer has additional + * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_MISSING_SAMPLE +/** + * Log a generic warning message about a missing feature. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] feature string containing the name of the missing feature + * @param[in] want_sample indicates if samples are wanted which exhibit this feature. + * If want_sample is non-zero, additional verbage will be added to the log + * message which tells the user how to report samples to the development + * mailing list. + * @deprecated Use avpriv_report_missing_feature() instead. + */ +attribute_deprecated +void av_log_missing_feature(void *avc, const char *feature, int want_sample); + +/** + * Log a generic warning message asking for a sample. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] msg string containing an optional message, or NULL if no message + * @deprecated Use avpriv_request_sample() instead. + */ +attribute_deprecated +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); +#endif /* FF_API_MISSING_SAMPLE */ + +/** + * Register the hardware accelerator hwaccel. + */ +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + */ +AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel); + + +/** + * Lock operation used by lockmgr + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. mutex points to a (void *) where the + * lockmgr should store/get a pointer to a user allocated mutex. It's + * NULL upon AV_LOCK_CREATE and != NULL for all other ops. + * + * @param cb User defined callback. Note: FFmpeg may invoke calls to this + * callback during the call to av_lockmgr_register(). + * Thus, the application must be prepared to handle that. + * If cb is set to NULL the lockmgr will be unregistered. + * Also note that during unregistration the previously registered + * lockmgr callback may also be invoked. + */ +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); + +/** + * Get the type of the given codec. + */ +enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avfft.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avfft.h new file mode 100644 index 0000000..2d20a45 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/avfft.h @@ -0,0 +1,116 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling ff_fft_calc(). + */ +void av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void av_fft_calc(FFTContext *s, FFTComplex *z); + +void av_fft_end(FFTContext *s); + +FFTContext *av_mdct_init(int nbits, int inverse, double scale); +void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); +void av_rdft_calc(RDFTContext *s, FFTSample *data); +void av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *av_dct_init(int nbits, enum DCTTransformType type); +void av_dct_calc(DCTContext *s, FFTSample *data); +void av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/dxva2.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/dxva2.h new file mode 100644 index 0000000..ac39e06 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/dxva2.h @@ -0,0 +1,95 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA_H +#define AVCODEC_DXVA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0600 +#undef _WIN32_WINNT +#endif + +#if !defined(_WIN32_WINNT) +#define _WIN32_WINNT 0x0600 +#endif + +#include +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/old_codec_ids.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/old_codec_ids.h new file mode 100644 index 0000000..f3affee --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/old_codec_ids.h @@ -0,0 +1,397 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_OLD_CODEC_IDS_H +#define AVCODEC_OLD_CODEC_IDS_H + +#include "common.h" + +/* + * This header exists to prevent new codec IDs from being accidentally added to + * the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVCodecID enum instead. + */ + + CODEC_ID_NONE = AV_CODEC_ID_NONE, + + /* video codecs */ + CODEC_ID_MPEG1VIDEO, + CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + CODEC_ID_MPEG2VIDEO_XVMC, + CODEC_ID_H261, + CODEC_ID_H263, + CODEC_ID_RV10, + CODEC_ID_RV20, + CODEC_ID_MJPEG, + CODEC_ID_MJPEGB, + CODEC_ID_LJPEG, + CODEC_ID_SP5X, + CODEC_ID_JPEGLS, + CODEC_ID_MPEG4, + CODEC_ID_RAWVIDEO, + CODEC_ID_MSMPEG4V1, + CODEC_ID_MSMPEG4V2, + CODEC_ID_MSMPEG4V3, + CODEC_ID_WMV1, + CODEC_ID_WMV2, + CODEC_ID_H263P, + CODEC_ID_H263I, + CODEC_ID_FLV1, + CODEC_ID_SVQ1, + CODEC_ID_SVQ3, + CODEC_ID_DVVIDEO, + CODEC_ID_HUFFYUV, + CODEC_ID_CYUV, + CODEC_ID_H264, + CODEC_ID_INDEO3, + CODEC_ID_VP3, + CODEC_ID_THEORA, + CODEC_ID_ASV1, + CODEC_ID_ASV2, + CODEC_ID_FFV1, + CODEC_ID_4XM, + CODEC_ID_VCR1, + CODEC_ID_CLJR, + CODEC_ID_MDEC, + CODEC_ID_ROQ, + CODEC_ID_INTERPLAY_VIDEO, + CODEC_ID_XAN_WC3, + CODEC_ID_XAN_WC4, + CODEC_ID_RPZA, + CODEC_ID_CINEPAK, + CODEC_ID_WS_VQA, + CODEC_ID_MSRLE, + CODEC_ID_MSVIDEO1, + CODEC_ID_IDCIN, + CODEC_ID_8BPS, + CODEC_ID_SMC, + CODEC_ID_FLIC, + CODEC_ID_TRUEMOTION1, + CODEC_ID_VMDVIDEO, + CODEC_ID_MSZH, + CODEC_ID_ZLIB, + CODEC_ID_QTRLE, + CODEC_ID_TSCC, + CODEC_ID_ULTI, + CODEC_ID_QDRAW, + CODEC_ID_VIXL, + CODEC_ID_QPEG, + CODEC_ID_PNG, + CODEC_ID_PPM, + CODEC_ID_PBM, + CODEC_ID_PGM, + CODEC_ID_PGMYUV, + CODEC_ID_PAM, + CODEC_ID_FFVHUFF, + CODEC_ID_RV30, + CODEC_ID_RV40, + CODEC_ID_VC1, + CODEC_ID_WMV3, + CODEC_ID_LOCO, + CODEC_ID_WNV1, + CODEC_ID_AASC, + CODEC_ID_INDEO2, + CODEC_ID_FRAPS, + CODEC_ID_TRUEMOTION2, + CODEC_ID_BMP, + CODEC_ID_CSCD, + CODEC_ID_MMVIDEO, + CODEC_ID_ZMBV, + CODEC_ID_AVS, + CODEC_ID_SMACKVIDEO, + CODEC_ID_NUV, + CODEC_ID_KMVC, + CODEC_ID_FLASHSV, + CODEC_ID_CAVS, + CODEC_ID_JPEG2000, + CODEC_ID_VMNC, + CODEC_ID_VP5, + CODEC_ID_VP6, + CODEC_ID_VP6F, + CODEC_ID_TARGA, + CODEC_ID_DSICINVIDEO, + CODEC_ID_TIERTEXSEQVIDEO, + CODEC_ID_TIFF, + CODEC_ID_GIF, + CODEC_ID_DXA, + CODEC_ID_DNXHD, + CODEC_ID_THP, + CODEC_ID_SGI, + CODEC_ID_C93, + CODEC_ID_BETHSOFTVID, + CODEC_ID_PTX, + CODEC_ID_TXD, + CODEC_ID_VP6A, + CODEC_ID_AMV, + CODEC_ID_VB, + CODEC_ID_PCX, + CODEC_ID_SUNRAST, + CODEC_ID_INDEO4, + CODEC_ID_INDEO5, + CODEC_ID_MIMIC, + CODEC_ID_RL2, + CODEC_ID_ESCAPE124, + CODEC_ID_DIRAC, + CODEC_ID_BFI, + CODEC_ID_CMV, + CODEC_ID_MOTIONPIXELS, + CODEC_ID_TGV, + CODEC_ID_TGQ, + CODEC_ID_TQI, + CODEC_ID_AURA, + CODEC_ID_AURA2, + CODEC_ID_V210X, + CODEC_ID_TMV, + CODEC_ID_V210, + CODEC_ID_DPX, + CODEC_ID_MAD, + CODEC_ID_FRWU, + CODEC_ID_FLASHSV2, + CODEC_ID_CDGRAPHICS, + CODEC_ID_R210, + CODEC_ID_ANM, + CODEC_ID_BINKVIDEO, + CODEC_ID_IFF_ILBM, + CODEC_ID_IFF_BYTERUN1, + CODEC_ID_KGV1, + CODEC_ID_YOP, + CODEC_ID_VP8, + CODEC_ID_PICTOR, + CODEC_ID_ANSI, + CODEC_ID_A64_MULTI, + CODEC_ID_A64_MULTI5, + CODEC_ID_R10K, + CODEC_ID_MXPEG, + CODEC_ID_LAGARITH, + CODEC_ID_PRORES, + CODEC_ID_JV, + CODEC_ID_DFA, + CODEC_ID_WMV3IMAGE, + CODEC_ID_VC1IMAGE, + CODEC_ID_UTVIDEO, + CODEC_ID_BMV_VIDEO, + CODEC_ID_VBLE, + CODEC_ID_DXTORY, + CODEC_ID_V410, + CODEC_ID_XWD, + CODEC_ID_CDXL, + CODEC_ID_XBM, + CODEC_ID_ZEROCODEC, + CODEC_ID_MSS1, + CODEC_ID_MSA1, + CODEC_ID_TSCC2, + CODEC_ID_MTS2, + CODEC_ID_CLLC, + CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + CODEC_ID_EXR = MKBETAG('0','E','X','R'), + CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + CODEC_ID_V308 = MKBETAG('V','3','0','8'), + CODEC_ID_V408 = MKBETAG('V','4','0','8'), + CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + CODEC_ID_SANM = MKBETAG('S','A','N','M'), + CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + CODEC_ID_SNOW = AV_CODEC_ID_SNOW, + + /* various PCM "codecs" */ + CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + CODEC_ID_PCM_S16LE = 0x10000, + CODEC_ID_PCM_S16BE, + CODEC_ID_PCM_U16LE, + CODEC_ID_PCM_U16BE, + CODEC_ID_PCM_S8, + CODEC_ID_PCM_U8, + CODEC_ID_PCM_MULAW, + CODEC_ID_PCM_ALAW, + CODEC_ID_PCM_S32LE, + CODEC_ID_PCM_S32BE, + CODEC_ID_PCM_U32LE, + CODEC_ID_PCM_U32BE, + CODEC_ID_PCM_S24LE, + CODEC_ID_PCM_S24BE, + CODEC_ID_PCM_U24LE, + CODEC_ID_PCM_U24BE, + CODEC_ID_PCM_S24DAUD, + CODEC_ID_PCM_ZORK, + CODEC_ID_PCM_S16LE_PLANAR, + CODEC_ID_PCM_DVD, + CODEC_ID_PCM_F32BE, + CODEC_ID_PCM_F32LE, + CODEC_ID_PCM_F64BE, + CODEC_ID_PCM_F64LE, + CODEC_ID_PCM_BLURAY, + CODEC_ID_PCM_LXF, + CODEC_ID_S302M, + CODEC_ID_PCM_S8_PLANAR, + + /* various ADPCM codecs */ + CODEC_ID_ADPCM_IMA_QT = 0x11000, + CODEC_ID_ADPCM_IMA_WAV, + CODEC_ID_ADPCM_IMA_DK3, + CODEC_ID_ADPCM_IMA_DK4, + CODEC_ID_ADPCM_IMA_WS, + CODEC_ID_ADPCM_IMA_SMJPEG, + CODEC_ID_ADPCM_MS, + CODEC_ID_ADPCM_4XM, + CODEC_ID_ADPCM_XA, + CODEC_ID_ADPCM_ADX, + CODEC_ID_ADPCM_EA, + CODEC_ID_ADPCM_G726, + CODEC_ID_ADPCM_CT, + CODEC_ID_ADPCM_SWF, + CODEC_ID_ADPCM_YAMAHA, + CODEC_ID_ADPCM_SBPRO_4, + CODEC_ID_ADPCM_SBPRO_3, + CODEC_ID_ADPCM_SBPRO_2, + CODEC_ID_ADPCM_THP, + CODEC_ID_ADPCM_IMA_AMV, + CODEC_ID_ADPCM_EA_R1, + CODEC_ID_ADPCM_EA_R3, + CODEC_ID_ADPCM_EA_R2, + CODEC_ID_ADPCM_IMA_EA_SEAD, + CODEC_ID_ADPCM_IMA_EA_EACS, + CODEC_ID_ADPCM_EA_XAS, + CODEC_ID_ADPCM_EA_MAXIS_XA, + CODEC_ID_ADPCM_IMA_ISS, + CODEC_ID_ADPCM_G722, + CODEC_ID_ADPCM_IMA_APC, + CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + + /* AMR */ + CODEC_ID_AMR_NB = 0x12000, + CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + CODEC_ID_RA_144 = 0x13000, + CODEC_ID_RA_288, + + /* various DPCM codecs */ + CODEC_ID_ROQ_DPCM = 0x14000, + CODEC_ID_INTERPLAY_DPCM, + CODEC_ID_XAN_DPCM, + CODEC_ID_SOL_DPCM, + + /* audio codecs */ + CODEC_ID_MP2 = 0x15000, + CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + CODEC_ID_AAC, + CODEC_ID_AC3, + CODEC_ID_DTS, + CODEC_ID_VORBIS, + CODEC_ID_DVAUDIO, + CODEC_ID_WMAV1, + CODEC_ID_WMAV2, + CODEC_ID_MACE3, + CODEC_ID_MACE6, + CODEC_ID_VMDAUDIO, + CODEC_ID_FLAC, + CODEC_ID_MP3ADU, + CODEC_ID_MP3ON4, + CODEC_ID_SHORTEN, + CODEC_ID_ALAC, + CODEC_ID_WESTWOOD_SND1, + CODEC_ID_GSM, ///< as in Berlin toast format + CODEC_ID_QDM2, + CODEC_ID_COOK, + CODEC_ID_TRUESPEECH, + CODEC_ID_TTA, + CODEC_ID_SMACKAUDIO, + CODEC_ID_QCELP, + CODEC_ID_WAVPACK, + CODEC_ID_DSICINAUDIO, + CODEC_ID_IMC, + CODEC_ID_MUSEPACK7, + CODEC_ID_MLP, + CODEC_ID_GSM_MS, /* as found in WAV */ + CODEC_ID_ATRAC3, + CODEC_ID_VOXWARE, + CODEC_ID_APE, + CODEC_ID_NELLYMOSER, + CODEC_ID_MUSEPACK8, + CODEC_ID_SPEEX, + CODEC_ID_WMAVOICE, + CODEC_ID_WMAPRO, + CODEC_ID_WMALOSSLESS, + CODEC_ID_ATRAC3P, + CODEC_ID_EAC3, + CODEC_ID_SIPR, + CODEC_ID_MP1, + CODEC_ID_TWINVQ, + CODEC_ID_TRUEHD, + CODEC_ID_MP4ALS, + CODEC_ID_ATRAC1, + CODEC_ID_BINKAUDIO_RDFT, + CODEC_ID_BINKAUDIO_DCT, + CODEC_ID_AAC_LATM, + CODEC_ID_QDMC, + CODEC_ID_CELT, + CODEC_ID_G723_1, + CODEC_ID_G729, + CODEC_ID_8SVX_EXP, + CODEC_ID_8SVX_FIB, + CODEC_ID_BMV_AUDIO, + CODEC_ID_RALF, + CODEC_ID_IAC, + CODEC_ID_ILBC, + CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + + /* subtitle codecs */ + CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + CODEC_ID_DVD_SUBTITLE = 0x17000, + CODEC_ID_DVB_SUBTITLE, + CODEC_ID_TEXT, ///< raw UTF-8 text + CODEC_ID_XSUB, + CODEC_ID_SSA, + CODEC_ID_MOV_TEXT, + CODEC_ID_HDMV_PGS_SUBTITLE, + CODEC_ID_DVB_TELETEXT, + CODEC_ID_SRT, + CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + + /* other specific kind of codecs (generally used for attachments) */ + CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + CODEC_ID_TTF = 0x18000, + CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + + CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it + + CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#endif /* AVCODEC_OLD_CODEC_IDS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vaapi.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vaapi.h new file mode 100644 index 0000000..815a27e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vaapi.h @@ -0,0 +1,173 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +struct vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; + + /** + * VAPictureParameterBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t pic_param_buf_id; + + /** + * VAIQMatrixBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t iq_matrix_buf_id; + + /** + * VABitPlaneBuffer ID (for VC-1 decoding) + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t bitplane_buf_id; + + /** + * Slice parameter/data buffer IDs + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t *slice_buf_ids; + + /** + * Number of effective slice buffer IDs to send to the HW + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int n_slice_buf_ids; + + /** + * Size of pre-allocated slice_buf_ids + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_buf_ids_alloc; + + /** + * Pointer to VASliceParameterBuffers + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + void *slice_params; + + /** + * Size of a VASliceParameterBuffer element + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_param_size; + + /** + * Size of pre-allocated slice_params + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_params_alloc; + + /** + * Number of slices currently filled in + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_count; + + /** + * Pointer to slice data buffer base + * - encoding: unused + * - decoding: Set by libavcodec + */ + const uint8_t *slice_data; + + /** + * Current size of slice data + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t slice_data_size; +}; + +/* @} */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vda.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vda.h new file mode 100644 index 0000000..b3d6399 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vda.h @@ -0,0 +1,162 @@ +/* + * VDA HW acceleration + * + * copyright (c) 2011 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDA_H +#define AVCODEC_VDA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + +#include + +// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes +// http://openradar.appspot.com/8026390 +#undef __GNUC_STDC_INLINE__ + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "libavcodec/version.h" + +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +/** + * This structure is used to provide the necessary configurations and data + * to the VDA FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct vda_context { + /** + * VDA decoder object. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + VDADecoder decoder; + + /** + * The Core Video pixel buffer that contains the current image data. + * + * encoding: unused + * decoding: Set by libavcodec. Unset by user. + */ + CVPixelBufferRef cv_buffer; + + /** + * Use the hardware decoder in synchronous mode. + * + * encoding: unused + * decoding: Set by user. + */ + int use_sync_decoding; + + /** + * The frame width. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int width; + + /** + * The frame height. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int height; + + /** + * The frame format. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int format; + + /** + * The pixel format for output image buffers. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + OSType cv_pix_fmt_type; + + /** + * The current bitstream buffer. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + uint8_t *priv_bitstream; + + /** + * The current size of the bitstream. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_bitstream_size; + + /** + * The reference size used for fast reallocation. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_allocated_size; + + /** + * Use av_buffer to manage buffer. + * When the flag is set, the CVPixelBuffers returned by the decoder will + * be released automatically, so you have to retain them if necessary. + * Not setting this flag may cause memory leak. + * + * encoding: unused + * decoding: Set by user. + */ + int use_ref_buffer; +}; + +/** Create the video decoder. */ +int ff_vda_create_decoder(struct vda_context *vda_ctx, + uint8_t *extradata, + int extradata_size); + +/** Destroy the video decoder. */ +int ff_vda_destroy_decoder(struct vda_context *vda_ctx); + +/** + * @} + */ + +#endif /* AVCODEC_VDA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vdpau.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vdpau.h new file mode 100644 index 0000000..b1c836c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/vdpau.h @@ -0,0 +1,195 @@ +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include +#include +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU 1 +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU 1 +#endif + +#if FF_API_BUFS_VDPAU +union AVVDPAUPictureInfo { + VdpPictureInfoH264 h264; + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoVC1 vc1; + VdpPictureInfoMPEG4Part2 mpeg4; +}; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + +#if FF_API_BUFS_VDPAU + /** + * VDPAU picture information + * + * Set by libavcodec. + */ + attribute_deprecated + union AVVDPAUPictureInfo info; + + /** + * Allocated size of the bitstream_buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_allocated; + + /** + * Useful bitstream buffers in the bitstream buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_used; + + /** + * Table of bitstream buffers. + * The user is responsible for freeing this buffer using av_freep(). + * + * Set by libavcodec. + */ + attribute_deprecated + VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +#if FF_API_CAP_VDPAU +/** @brief The videoSurface is used for rendering. */ +#define FF_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * @brief The videoSurface is needed for reference/prediction. + * The codec manipulates this. + */ +#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 + +/** + * @brief This structure is used as a callback between the FFmpeg + * decoder (vd_) and presentation (vo_) module. + * This is used for defining a video frame containing surface, + * picture parameter, bitstream information etc which are passed + * between the FFmpeg decoder and its clients. + */ +struct vdpau_render_state { + VdpVideoSurface surface; ///< Used as rendered surface, never changed. + + int state; ///< Holds FF_VDPAU_STATE_* values. + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif + + /** Describe size/location of the compressed video data. + Set to 0 when freeing bitstream_buffers. */ + int bitstream_buffers_allocated; + int bitstream_buffers_used; + /** The user is responsible for freeing this buffer using av_freep(). */ + VdpBitstreamBuffer *bitstream_buffers; + +#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif +}; +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/version.h new file mode 100644 index 0000000..2bbd77b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/version.h @@ -0,0 +1,104 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "avutil.h" + +#define LIBAVCODEC_VERSION_MAJOR 55 +#define LIBAVCODEC_VERSION_MINOR 39 +#define LIBAVCODEC_VERSION_MICRO 101 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_REQUEST_CHANNELS +#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ALLOC_CONTEXT +#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_AVCODEC_OPEN +#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_OLD_DECODE_AUDIO +#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_TIMECODE +#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55) +#endif + +#ifndef FF_API_OLD_ENCODE_AUDIO +#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_ENCODE_VIDEO +#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CODEC_ID +#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_AVCODEC_RESAMPLE +#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DEINTERLACE +#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DESTRUCT_PACKET +#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_GET_BUFFER +#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_MISSING_SAMPLE +#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif + +#endif /* AVCODEC_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/xvmc.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/xvmc.h new file mode 100644 index 0000000..b2bf518 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavcodec/xvmc.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include + +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/avdevice.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/avdevice.h new file mode 100644 index 0000000..93a044f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/avdevice.h @@ -0,0 +1,69 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_AVDEVICE_H +#define AVDEVICE_AVDEVICE_H + +#include "version.h" + +/** + * @file + * @ingroup lavd + * Main libavdevice API header + */ + +/** + * @defgroup lavd Special devices muxing/demuxing library + * @{ + * Libavdevice is a complementary library to @ref libavf "libavformat". It + * provides various "special" platform-specific muxers and demuxers, e.g. for + * grabbing devices, audio capture and playback etc. As a consequence, the + * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own + * I/O functions). The filename passed to avformat_open_input() often does not + * refer to an actually existing file, but has some special device-specific + * meaning - e.g. for x11grab it is the display name. + * + * To use libavdevice, simply call avdevice_register_all() to register all + * compiled muxers and demuxers. They all use standard libavformat API. + * @} + */ + +#include "libavformat/avformat.h" + +/** + * Return the LIBAVDEVICE_VERSION_INT constant. + */ +unsigned avdevice_version(void); + +/** + * Return the libavdevice build-time configuration. + */ +const char *avdevice_configuration(void); + +/** + * Return the libavdevice license. + */ +const char *avdevice_license(void); + +/** + * Initialize libavdevice and register all the input and output devices. + * @warning This function is not thread safe. + */ +void avdevice_register_all(void); + +#endif /* AVDEVICE_AVDEVICE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/version.h new file mode 100644 index 0000000..4f4ec99 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavdevice/version.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_VERSION_H +#define AVDEVICE_VERSION_H + +/** + * @file + * @ingroup lavd + * Libavdevice version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVDEVICE_VERSION_MAJOR 55 +#define LIBAVDEVICE_VERSION_MINOR 5 +#define LIBAVDEVICE_VERSION_MICRO 100 + +#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT + +#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#endif /* AVDEVICE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/asrc_abuffer.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/asrc_abuffer.h new file mode 100644 index 0000000..aa34461 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/asrc_abuffer.h @@ -0,0 +1,91 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_ASRC_ABUFFER_H +#define AVFILTER_ASRC_ABUFFER_H + +#include "avfilter.h" + +/** + * @file + * memory buffer source for audio + * + * @deprecated use buffersrc.h instead. + */ + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param data pointers to the samples planes + * @param linesize linesizes of each audio buffer plane + * @param nb_samples number of samples per channel + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param planar flag to indicate if audio data is planar or packed + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * This is similar to av_asrc_buffer_add_samples(), but the samples + * are stored in a buffer with known size. + * + * @param abuffersrc audio source buffer context + * @param buf pointer to the samples data, packed is assumed + * @param size the size in bytes of the buffer, it must contain an + * integer number of samples + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc, + uint8_t *buf, int buf_size, + int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param samplesref buffer ref to queue + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc, + AVFilterBufferRef *samplesref, + int av_unused flags); + +#endif /* AVFILTER_ASRC_ABUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avcodec.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avcodec.h new file mode 100644 index 0000000..8bbdad2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avcodec.h @@ -0,0 +1,110 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVCODEC_H +#define AVFILTER_AVCODEC_H + +/** + * @file + * libavcodec/libavfilter gluing utilities + * + * This should be included in an application ONLY if the installed + * libavfilter has been compiled with libavcodec support, otherwise + * symbols defined below will not be available. + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms); + + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms); + +/** + * Create and return a buffer reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms); +#endif + +#if FF_API_FILL_FRAME +/** + * Fill an AVFrame with the information stored in samplesref. + * + * @param frame an already allocated AVFrame + * @param samplesref an audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref); + +/** + * Fill an AVFrame with the information stored in picref. + * + * @param frame an already allocated AVFrame + * @param picref a video buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref); + +/** + * Fill an AVFrame with information stored in ref. + * + * @param frame an already allocated AVFrame + * @param ref a video or audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref); +#endif + +#endif /* AVFILTER_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfilter.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfilter.h new file mode 100644 index 0000000..412b123 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfilter.h @@ -0,0 +1,1520 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi Libavfilter - graph-based frame editing library + * @{ + */ + +#include + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +#if FF_API_AVFILTERBUFFER +/** + * A reference-counted buffer data type used by the filter system. Filters + * should not store pointers to this structure directly, but instead use the + * AVFilterBufferRef structure below. + */ +typedef struct AVFilterBuffer { + uint8_t *data[8]; ///< buffer data for each plane/channel + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + /** private data to be used by a custom free function */ + void *priv; + /** + * A pointer to the function to deallocate this buffer if the default + * function is not sufficient. This could, for example, add the memory + * back into a memory pool to be reused later without the overhead of + * reallocating it from scratch. + */ + void (*free)(struct AVFilterBuffer *buf); + + int format; ///< media format + int w, h; ///< width and height of the allocated buffer + unsigned refcount; ///< number of references to this buffer +} AVFilterBuffer; + +#define AV_PERM_READ 0x01 ///< can read from the buffer +#define AV_PERM_WRITE 0x02 ///< can write to the buffer +#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer +#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time +#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time +#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes +#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned + +#define AVFILTER_ALIGN 16 //not part of ABI + +/** + * Audio specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, audio specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefAudioProps { + uint64_t channel_layout; ///< channel layout of audio buffer + int nb_samples; ///< number of audio samples per channel + int sample_rate; ///< audio buffer sample rate + int channels; ///< number of channels (do not access directly) +} AVFilterBufferRefAudioProps; + +/** + * Video specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, video specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefVideoProps { + int w; ///< image width + int h; ///< image height + AVRational sample_aspect_ratio; ///< sample aspect ratio + int interlaced; ///< is frame interlaced + int top_field_first; ///< field order + enum AVPictureType pict_type; ///< picture type of the frame + int key_frame; ///< 1 -> keyframe, 0-> not + int qp_table_linesize; ///< qp_table stride + int qp_table_size; ///< qp_table size + int8_t *qp_table; ///< array of Quantization Parameters +} AVFilterBufferRefVideoProps; + +/** + * A reference to an AVFilterBuffer. Since filters can manipulate the origin of + * a buffer to, for example, crop image without any memcpy, the buffer origin + * and dimensions are per-reference properties. Linesize is also useful for + * image flipping, frame to field filters, etc, and so is also per-reference. + * + * TODO: add anything necessary for frame reordering + */ +typedef struct AVFilterBufferRef { + AVFilterBuffer *buf; ///< the buffer that this is a reference to + uint8_t *data[8]; ///< picture/audio data for each plane + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + AVFilterBufferRefVideoProps *video; ///< video buffer specific properties + AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties + + /** + * presentation timestamp. The time unit may change during + * filtering, as it is specified in the link and the filter code + * may need to rescale the PTS accordingly. + */ + int64_t pts; + int64_t pos; ///< byte position in stream, -1 if unknown + + int format; ///< media format + + int perms; ///< permissions, see the AV_PERM_* flags + + enum AVMediaType type; ///< media type of buffer data + + AVDictionary *metadata; ///< dictionary containing metadata key=value tags +} AVFilterBufferRef; + +/** + * Copy properties of src to dst, without copying the actual data + */ +attribute_deprecated +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); + +/** + * Add a new reference to a buffer. + * + * @param ref an existing reference to the buffer + * @param pmask a bitmask containing the allowable permissions in the new + * reference + * @return a new reference to the buffer with the same properties as the + * old, excluding any permissions denied by pmask + */ +attribute_deprecated +AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); + +/** + * Remove a reference to a buffer. If this is the last reference to the + * buffer, the buffer itself is also automatically freed. + * + * @param ref reference to the buffer, may be NULL + * + * @note it is recommended to use avfilter_unref_bufferp() instead of this + * function + */ +attribute_deprecated +void avfilter_unref_buffer(AVFilterBufferRef *ref); + +/** + * Remove a reference to a buffer and set the pointer to NULL. + * If this is the last reference to the buffer, the buffer itself + * is also automatically freed. + * + * @param ref pointer to the buffer reference + */ +attribute_deprecated +void avfilter_unref_bufferp(AVFilterBufferRef **ref); +#endif + +/** + * Get the number of channels of a buffer reference. + */ +attribute_deprecated +int avfilter_ref_get_channels(AVFilterBufferRef *ref); + +#if FF_API_AVFILTERPAD_PUBLIC +/** + * A filter pad used for either input or output. + * + * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. + * users should call avfilter_pad_get_name() and avfilter_pad_get_type() + * to access the name and type fields; there should be no need to access + * any other fields from outside of libavfilter. + */ +struct AVFilterPad { + /** + * Pad name. The name is unique among inputs and among outputs, but an + * input may have the same name as an output. This may be NULL if this + * pad has no need to ever be referenced by name. + */ + const char *name; + + /** + * AVFilterPad type. + */ + enum AVMediaType type; + + /** + * Input pads: + * Minimum required permissions on incoming buffers. Any buffer with + * insufficient permissions will be automatically copied by the filter + * system to a new buffer which provides the needed access permissions. + * + * Output pads: + * Guaranteed permissions on outgoing buffers. Any buffer pushed on the + * link must have at least these permissions; this fact is checked by + * asserts. It can be used to optimize buffer allocation. + */ + attribute_deprecated int min_perms; + + /** + * Input pads: + * Permissions which are not accepted on incoming buffers. Any buffer + * which has any of these permissions set will be automatically copied + * by the filter system to a new buffer which does not have those + * permissions. This can be used to easily disallow buffers with + * AV_PERM_REUSE. + * + * Output pads: + * Permissions which are automatically removed on outgoing buffers. It + * can be used to optimize buffer allocation. + */ + attribute_deprecated int rej_perms; + + /** + * @deprecated unused + */ + int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); + + /** + * Callback function to get a video buffer. If NULL, the filter system will + * use ff_default_get_video_buffer(). + * + * Input video pads only. + */ + AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); + + /** + * Callback function to get an audio buffer. If NULL, the filter system will + * use ff_default_get_audio_buffer(). + * + * Input audio pads only. + */ + AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); + + /** + * @deprecated unused + */ + int (*end_frame)(AVFilterLink *link); + + /** + * @deprecated unused + */ + int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); + + /** + * Filtering callback. This is where a filter receives a frame with + * audio/video data and should do its processing. + * + * Input pads only. + * + * @return >= 0 on success, a negative AVERROR on error. This function + * must ensure that frame is properly unreferenced on error if it + * hasn't been passed on to another filter. + */ + int (*filter_frame)(AVFilterLink *link, AVFrame *frame); + + /** + * Frame poll callback. This returns the number of immediately available + * samples. It should return a positive value if the next request_frame() + * is guaranteed to return one frame (with no delay). + * + * Defaults to just calling the source poll_frame() method. + * + * Output pads only. + */ + int (*poll_frame)(AVFilterLink *link); + + /** + * Frame request callback. A call to this should result in at least one + * frame being output over the given link. This should return zero on + * success, and another value on error. + * See ff_request_frame() for the error codes with a specific + * meaning. + * + * Output pads only. + */ + int (*request_frame)(AVFilterLink *link); + + /** + * Link configuration callback. + * + * For output pads, this should set the following link properties: + * video: width, height, sample_aspect_ratio, time_base + * audio: sample_rate. + * + * This should NOT set properties such as format, channel_layout, etc which + * are negotiated between filters by the filter system using the + * query_formats() callback before this function is called. + * + * For input pads, this should check the properties of the link, and update + * the filter's internal state as necessary. + * + * For both input and output pads, this should return zero on success, + * and another value on error. + */ + int (*config_props)(AVFilterLink *link); + + /** + * The filter expects a fifo to be inserted on its input link, + * typically because it has a delay. + * + * input pads only. + */ + int needs_fifo; + + int needs_writable; +}; +#endif + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs +#endif + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs +#endif + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialzed, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + struct AVFilterPool *pool; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown; + * if left to 0/0, will be automatically be copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * The buffer reference currently being received across the link by the + * destination filter. This is used internally by the filter system to + * allow automatic copying of buffers which do not have sufficient + * permissions for the destination. This should not be accessed directly + * by the filters. + */ + AVFilterBufferRef *cur_buf_copy; + + /** + * True if the link is closed. + * If set, all attemps of start_frame, filter_frame or request_frame + * will fail with AVERROR_EOF, and if necessary the reference will be + * destroyed. + * If request_frame returns AVERROR_EOF, this flag is set on the + * corresponding link. + * It can be set also be set by either the source or the destination + * filter. + */ + int closed; + + /** + * Number of channels. + */ + int channels; + + /** + * True if a frame is being requested on the link. + * Used internally by the framework. + */ + unsigned frame_requested; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count; +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + */ +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int avfilter_config_links(AVFilterContext *filter); + +#if FF_API_AVFILTERBUFFER +/** + * Create a buffer reference wrapped around an already allocated image + * buffer. + * + * @param data pointers to the planes of the image to reference + * @param linesize linesizes for the planes of the image to reference + * @param perms the required access permissions + * @param w the width of the image specified by the data and linesize arrays + * @param h the height of the image specified by the data and linesize arrays + * @param format the pixel format of the image specified by the data and linesize arrays + */ +attribute_deprecated +AVFilterBufferRef * +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, + int w, int h, enum AVPixelFormat format); + +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version + * that can handle unknown channel layouts. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channel_layout the channel layout of the buffer + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout); +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channels the number of channels of the buffer + * @param channel_layout the channel layout of the buffer, + * must be either 0 or consistent with channels + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout); + +#endif + + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** Initialize the filter system. Register all builtin filters. */ +void avfilter_register_all(void); + +#if FF_API_OLD_FILTER_REGISTER +/** Uninitialize the filter system. Unregister all filters. */ +attribute_deprecated +void avfilter_uninit(void); +#endif + +/** + * Register a filter. This is only needed if you plan to use + * avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +int avfilter_register(AVFilter *filter); + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +AVFilter *avfilter_get_by_name(const char *name); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +const AVFilter *avfilter_next(const AVFilter *prev); + +#if FF_API_OLD_FILTER_REGISTER +/** + * If filter is NULL, returns a pointer to the first registered filter pointer, + * if filter is non-NULL, returns the next pointer after filter. + * If the returned pointer points to NULL, the last registered filter + * was already reached. + * @deprecated use avfilter_next() + */ +attribute_deprecated +AVFilter **av_filter_next(AVFilter **filter); +#endif + +#if FF_API_AVFILTER_OPEN +/** + * Create a filter instance. + * + * @param filter_ctx put here a pointer to the created filter context + * on success, NULL on failure + * @param filter the filter to create an instance of + * @param inst_name Name to give to the new instance. Can be NULL for none. + * @return >= 0 in case of success, a negative error code otherwise + * @deprecated use avfilter_graph_alloc_filter() instead + */ +attribute_deprecated +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); +#endif + + +#if FF_API_AVFILTER_INIT_FILTER +/** + * Initialize a filter. + * + * @param filter the filter to initialize + * @param args A string of parameters to use when initializing the filter. + * The format and meaning of this string varies by filter. + * @param opaque Any extra non-string data needed by the filter. The meaning + * of this parameter varies by filter. + * @return zero on success + */ +attribute_deprecated +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque); +#endif + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +#if FF_API_AVFILTERBUFFER +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + +/** + * Copy the frame properties and data pointers of src to dst, without copying + * the actual data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); +#endif + +/** + * @return AVClass for AVFilterContext. + * + * @see av_opt_find(). + */ +const AVClass *avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; +#if FF_API_FOO_COUNT + attribute_deprecated + unsigned filter_count_unused; +#endif + AVFilterContext **filters; +#if !FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters + char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#if FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + */ +AVFilterGraph *avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * avfilter_graph_get_filter()) on success or NULL or failure. + */ +AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance with name name from graph. + * + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name); + +#if FF_API_AVFILTER_OPEN +/** + * Add an existing filter instance to a filter graph. + * + * @param graphctx the filter graph + * @param filter the filter to be added + * + * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a + * filter graph + */ +attribute_deprecated +int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); +#endif + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void avfilter_inout_free(AVFilterInOut **inout); + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed grap context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); +#else +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + * @deprecated Use avfilter_graph_parse_ptr() instead. + */ +attribute_deprecated +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); +#endif + +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfiltergraph.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfiltergraph.h new file mode 100644 index 0000000..b31d581 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/avfiltergraph.h @@ -0,0 +1,28 @@ +/* + * Filter graphs + * copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTERGRAPH_H +#define AVFILTER_AVFILTERGRAPH_H + +#include "avfilter.h" +#include "libavutil/log.h" + +#endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersink.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersink.h new file mode 100644 index 0000000..ce96d08 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersink.h @@ -0,0 +1,186 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Get an audio/video buffer data from buffer_sink and put it in bufref. + * + * This function works with both audio and video buffer sinks. + * + * @param buffer_sink pointer to a buffersink or abuffersink context + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +attribute_deprecated +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, + AVFilterBufferRef **bufref, int flags); + +/** + * Get the number of immediately available frames. + */ +attribute_deprecated +int av_buffersink_poll_frame(AVFilterContext *ctx); + +/** + * Get a buffer with filtered data from sink and put it in buf. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +attribute_deprecated +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); + +/** + * Same as av_buffersink_read, but with the ability to specify the number of + * samples read. This function is less efficient than av_buffersink_read(), + * because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). buf + * will contain exactly nb_samples audio samples, except at the end + * of stream, when it can contain less than nb_samples. + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @warning do not mix this function with av_buffersink_read(). Use only one or + * the other with a single sink, not both. + */ +attribute_deprecated +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples); +#endif + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * Get the frame rate of the input. + */ +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @warning do not mix this function with av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersrc.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersrc.h new file mode 100644 index 0000000..89613e1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/buffersrc.h @@ -0,0 +1,148 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * Memory buffer source API. + */ + +#include "libavcodec/avcodec.h" +#include "avfilter.h" + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + +#if FF_API_AVFILTERBUFFER + /** + * Ignored + */ + AV_BUFFERSRC_FLAG_NO_COPY = 2, +#endif + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Add buffer data in picref to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param picref a buffer reference, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_ref(AVFilterContext *buffer_src, + AVFilterBufferRef *picref, int flags); + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +#if FF_API_AVFILTERBUFFER +/** + * Add a buffer to the filtergraph s. + * + * @param buf buffer containing frame data to be passed down the filtergraph. + * This function will take ownership of buf, the user must not free it. + * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. + * + * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() + */ +attribute_deprecated +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +#endif + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and av_buffersrc_write_frame() is + * that av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/version.h new file mode 100644 index 0000000..541b869 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavfilter/version.h @@ -0,0 +1,89 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFILTER_VERSION_MAJOR 3 +#define LIBAVFILTER_VERSION_MINOR 90 +#define LIBAVFILTER_VERSION_MICRO 100 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_AVFILTERPAD_PUBLIC +#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FOO_COUNT +#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FILL_FRAME +#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_BUFFERSRC_BUFFER +#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTERBUFFER +#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_OPTS +#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_ACONVERT_FILTER +#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_OPEN +#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_INIT_FILTER +#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_REGISTER +#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_GRAPH_PARSE +#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_DRAWTEXT_OLD_TIMELINE +#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avformat.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avformat.h new file mode 100644 index 0000000..4e5683c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avformat.h @@ -0,0 +1,2239 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf I/O and Muxing/Demuxing Library + * @{ + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the av_iformat_next() / + * av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * Lavf allows to configure muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL or filename, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * av_dict_set(&options, "video_size", "640x480", 0); + * av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or + * avcodec_decode_subtitle2() if the caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with av_free_packet() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + * + */ + +#include +#include /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- . + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct AVFrac { + int64_t val, num, den; +} AVFrac; + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for + raw picture data. */ +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#if LIBAVFORMAT_VERSION_MAJOR <= 54 +#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks +#else +#define AVFMT_TS_NONSTRICT 0x20000 +#endif + /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in av_write_frame and + av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. Only used in raw format right + * now. 'avformat_new_stream' should be called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The single packet associated with it will be returned + * among the first few packets read from the file unless seeking takes place. + * It can also be accessed at any time in AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; + /** + * Codec context associated with this stream. Allocated and freed by + * libavformat. + * + * - decoding: The demuxer exports codec information stored in the headers + * here. + * - encoding: The user sets codec information, the muxer writes it to the + * output. Mandatory fields as specified in AVCodecContext + * documentation must be set even if this AVCodecContext is + * not actually used for encoding. + */ + AVCodecContext *codec; + void *priv_data; + + /** + * encoding: pts generation when outputting stream + */ + struct AVFrac pts; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: set by libavformat in avformat_write_header. The muxer may use the + * user-provided value of @ref AVCodecContext.time_base "codec->time_base" + * as a hint. + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Stream information used internally by av_find_stream_info() + */ +#define MAX_STD_TIMEBASES (60*12+6) + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t reference_dts; + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ +#define MAX_PROBE_PACKETS 2500 + int probe_packets; + + /** + * Number of frames that have been demuxed during av_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + * + * Code outside avformat should access this field using: + * av_stream_get/set_r_frame_rate(stream) + */ + AVRational r_frame_rate; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + +} AVStream; + +AVRational av_stream_get_r_frame_rate(const AVStream *s); +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + */ +typedef struct AVFormatContext { + /** + * A class for logging and AVOptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * Can only be iformat or oformat, not both at the same time. + * + * decoding: set by avformat_open_input(). + * encoding: set by the user. + */ + struct AVInputFormat *iformat; + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + */ + void *priv_data; + + /** + * I/O context. + * + * decoding: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * encoding: set by the user. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ + + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * decoding: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * encoding: streams are created by the user before avformat_write_header(). + */ + unsigned int nb_streams; + AVStream **streams; + + char filename[1024]; /**< input or output filename */ + + /** + * Decoding: position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + */ + int64_t duration; + + /** + * Decoding: total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int bit_rate; + + unsigned int packet_size; + int max_delay; + + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. + + /** + * decoding: size of data to probe; encoding: unused. + */ + unsigned int probesize; + + /** + * decoding: maximum time (in AV_TIME_BASE units) during which the input should + * be analyzed in avformat_find_stream_info(). + */ + int max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * muxing : unused + * demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * muxing : set by user + * demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the unix epoch (00:00 1st January 1970). That is, pts=0 + * in the stream was captured at this real world time. + * - encoding: Set by user. + * - decoding: Unused. + */ + int64_t start_time_realtime; + + /** + * decoding: number of frames used to probe fps + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * decoding: set by the user before avformat_open_input(). + * encoding: set by the user before avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int use_wallclock_as_timestamps; + + /** + * Avoid negative timestamps during muxing. + * 0 -> allow negative timestamps + * 1 -> avoid negative timestamps + * -1 -> choose automatically (default) + * Note, this only works when interleave_packet_per_dts is in use. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int avoid_negative_ts; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user via AVOptions (NO direct access) + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + unsigned int skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + */ + int probe_score; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + */ + AVCodec *subtitle_codec; +} AVFormatContext; + +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see av_register_input_format() + * @see av_register_output_format() + */ +void av_register_all(void); + +void av_register_input_format(AVInputFormat *format); +void av_register_output_format(AVOutputFormat *format); + +/** + * Do global initialization of network components. This is optional, + * but recommended, since it avoids the overhead of implicitly + * doing the setup for each session. + * + * Calling this function will become mandatory if using network + * protocols at some major version bump. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. + */ +int avformat_network_deinit(void); + +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +AVInputFormat *av_iformat_next(AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +AVOutputFormat *av_oformat_next(AVOutputFormat *f); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +#if FF_API_ALLOC_OUTPUT_CONTEXT +/** + * @deprecated deprecated in favor of avformat_alloc_output_context2() + */ +attribute_deprecated +AVFormatContext *avformat_alloc_output_context(const char *format, + AVOutputFormat *oformat, + const char *filename); +#endif + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param filename the filename of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ +int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param filename Name of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +#if FF_API_FORMAT_PARAMETERS +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @return >=0 if OK, AVERROR_xxx on error + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + * + * @deprecated use avformat_find_stream_info. + */ +attribute_deprecated +int av_find_stream_info(AVFormatContext *ic); +#endif + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +#if FF_API_READ_PACKET +/** + * @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw + * unprocessed packets + * + * Read a transport packet from a media file. + * + * This function is obsolete and should never be used. + * Use av_read_frame() instead. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +attribute_deprecated +int av_read_packet(AVFormatContext *s, AVPacket *pkt); +#endif + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * av_free_packet when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +#if FF_API_CLOSE_INPUT_FILE +/** + * @deprecated use avformat_close_input() + * Close a media file (but not its codecs). + * + * @param s media file handle + */ +attribute_deprecated +void av_close_input_file(AVFormatContext *s); +#endif + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#if FF_API_NEW_STREAM +/** + * Add a new stream to a media file. + * + * Can only be called in the read_header() function. If the flag + * AVFMTCTX_NOHEADER is in the format context, then new streams + * can be added in read_packet too. + * + * @param s media file handle + * @param id file-format-dependent stream ID + */ +attribute_deprecated +AVStream *av_new_stream(AVFormatContext *s, int id); +#endif + +#if FF_API_SET_PTS_INFO +/** + * @deprecated this function is not supposed to be called outside of lavf + */ +attribute_deprecated +void av_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); +#endif + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number + +/** + * @addtogroup lavf_encoding + * @{ + */ +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. + */ +int avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * The packet shall contain one audio or video frame. + * The packet must be correctly interleaved according to the container + * specification, if not then av_interleaved_write_frame must be used. + * + * @param s media file handle + * @param pkt The packet, which contains the stream_index, buf/buf_size, + * dts/pts, ... + * This can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, + * for muxers that buffer up data internally before writing it + * to the output. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + */ +int av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * The packet must contain one audio or video frame. + * If the packets are already correctly interleaved, the application should + * call av_write_frame() instead as it is slightly faster. It is also important + * to keep in mind that completely non-interleaved input will need huge amounts + * of memory to interleave with this, so it is preferable to interleave at the + * demuxer level. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. pkt->buf must be set + * to a valid AVBufferRef describing the packet data. Libavformat takes + * ownership of this reference and will unref it when it sees fit. The caller + * must not access the data through this reference after this function returns. + * This can be NULL (at any time, not just at the end), to flush the + * interleaving queues. + * Packet's @ref AVPacket.stream_index "stream_index" field must be set to the + * index of the corresponding stream in @ref AVFormatContext.streams + * "s.streams". + * It is very strongly recommended that timing information (@ref AVPacket.pts + * "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to + * correct values. + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, + AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +void av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param extensions a comma-separated list of filename extensions + */ +int av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avio.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avio.h new file mode 100644 index 0000000..4f4ac3c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/avio.h @@ -0,0 +1,481 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + + +#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int must_flush; /**< true if the next seek should flush */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; +} AVIOContext; + +/* unbuffered I/O */ + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int avio_check(const char *url, int flags); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with av_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with av_malloc() and friends. + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +void avio_w8(AVIOContext *s, int b); +void avio_write(AVIOContext *s, const unsigned char *buf, int size); +void avio_wl64(AVIOContext *s, uint64_t val); +void avio_wb64(AVIOContext *s, uint64_t val); +void avio_wl32(AVIOContext *s, unsigned int val); +void avio_wb32(AVIOContext *s, unsigned int val); +void avio_wl24(AVIOContext *s, unsigned int val); +void avio_wb24(AVIOContext *s, unsigned int val); +void avio_wl16(AVIOContext *s, unsigned int val); +void avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @return number of bytes written. + */ +int avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Passing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Oring this flag as into the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int url_feof(AVIOContext *s); + +/** @warning currently size is limited */ +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data to the output s. + * + * Force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + */ +void avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int avio_r8 (AVIOContext *s); +unsigned int avio_rl16(AVIOContext *s); +unsigned int avio_rl24(AVIOContext *s); +unsigned int avio_rl32(AVIOContext *s); +uint64_t avio_rl64(AVIOContext *s); +unsigned int avio_rb16(AVIOContext *s); +unsigned int avio_rb24(AVIOContext *s); +unsigned int avio_rb32(AVIOContext *s); +uint64_t avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_closep + */ +int avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_close + */ +int avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with av_free(). + * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * @param pause 1 for pause, 0 for resume + */ +int avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +#endif /* AVFORMAT_AVIO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/version.h new file mode 100644 index 0000000..0028c9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavformat/version.h @@ -0,0 +1,76 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFORMAT_VERSION_MAJOR 55 +#define LIBAVFORMAT_VERSION_MINOR 19 +#define LIBAVFORMAT_VERSION_MICRO 104 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_ALLOC_OUTPUT_CONTEXT +#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FORMAT_PARAMETERS +#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_NEW_STREAM +#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_SET_PTS_INFO +#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CLOSE_INPUT_FILE +#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_READ_PACKET +#define FF_API_READ_PACKET (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ASS_SSA +#define FF_API_ASS_SSA (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/avresample.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/avresample.h new file mode 100644 index 0000000..f62e533 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/avresample.h @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_AVRESAMPLE_H +#define AVRESAMPLE_AVRESAMPLE_H + +/** + * @file + * @ingroup lavr + * external API header + */ + +/** + * @defgroup lavr Libavresample + * @{ + * + * Libavresample (lavr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lavr is done through AVAudioResampleContext, which is + * allocated with avresample_alloc_context(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * AVAudioResampleContext *avr = avresample_alloc_context(); + * av_opt_set_int(avr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_int(avr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(avr, "in_sample_rate", 48000, 0); + * av_opt_set_int(avr, "out_sample_rate", 44100, 0); + * av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once the context is initialized, it must be opened with avresample_open(). If + * you need to change the conversion parameters, you must close the context with + * avresample_close(), change the parameters as described above, then reopen it + * again. + * + * The conversion itself is done by repeatedly calling avresample_convert(). + * Note that the samples may get buffered in two places in lavr. The first one + * is the output FIFO, where the samples end up if the output buffer is not + * large enough. The data stored in there may be retrieved at any time with + * avresample_read(). The second place is the resampling delay buffer, + * applicable only when resampling is done. The samples in it require more input + * before they can be processed. Their current amount is returned by + * avresample_get_delay(). At the end of conversion the resampling buffer can be + * flushed by calling avresample_convert() with NULL input. + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_linesize, in_samples; + * + * while (get_input(&input, &in_linesize, &in_samples)) { + * uint8_t *output + * int out_linesize; + * int out_samples = avresample_available(avr) + + * av_rescale_rnd(avresample_get_delay(avr) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, &out_linesize, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = avresample_convert(avr, &output, out_linesize, out_samples, + * input, in_linesize, in_samples); + * handle_output(output, out_linesize, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished and the FIFOs are flushed if required, the + * conversion context and everything associated with it must be freed with + * avresample_free(). + */ + +#include "libavutil/avutil.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavresample/version.h" + +#define AVRESAMPLE_MAX_CHANNELS 32 + +typedef struct AVAudioResampleContext AVAudioResampleContext; + +/** Mixing Coefficient Types */ +enum AVMixCoeffType { + AV_MIX_COEFF_TYPE_Q8, /** 16-bit 8.8 fixed-point */ + AV_MIX_COEFF_TYPE_Q15, /** 32-bit 17.15 fixed-point */ + AV_MIX_COEFF_TYPE_FLT, /** floating-point */ + AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */ +}; + +/** Resampling Filter Types */ +enum AVResampleFilterType { + AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */ + AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +enum AVResampleDitherMethod { + AV_RESAMPLE_DITHER_NONE, /**< Do not use dithering */ + AV_RESAMPLE_DITHER_RECTANGULAR, /**< Rectangular Dither */ + AV_RESAMPLE_DITHER_TRIANGULAR, /**< Triangular Dither*/ + AV_RESAMPLE_DITHER_TRIANGULAR_HP, /**< Triangular Dither with High Pass */ + AV_RESAMPLE_DITHER_TRIANGULAR_NS, /**< Triangular Dither with Noise Shaping */ + AV_RESAMPLE_DITHER_NB, /**< Number of dither types. Not part of ABI. */ +}; + +/** + * Return the LIBAVRESAMPLE_VERSION_INT constant. + */ +unsigned avresample_version(void); + +/** + * Return the libavresample build-time configuration. + * @return configure string + */ +const char *avresample_configuration(void); + +/** + * Return the libavresample license. + */ +const char *avresample_license(void); + +/** + * Get the AVClass for AVAudioResampleContext. + * + * Can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options + * without allocating a context. + * + * @see av_opt_find(). + * + * @return AVClass for AVAudioResampleContext + */ +const AVClass *avresample_get_class(void); + +/** + * Allocate AVAudioResampleContext and set options. + * + * @return allocated audio resample context, or NULL on failure + */ +AVAudioResampleContext *avresample_alloc_context(void); + +/** + * Initialize AVAudioResampleContext. + * + * @param avr audio resample context + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_open(AVAudioResampleContext *avr); + +/** + * Close AVAudioResampleContext. + * + * This closes the context, but it does not change the parameters. The context + * can be reopened with avresample_open(). It does, however, clear the output + * FIFO and any remaining leftover samples in the resampling delay buffer. If + * there was a custom matrix being used, that is also cleared. + * + * @see avresample_convert() + * @see avresample_set_matrix() + * + * @param avr audio resample context + */ +void avresample_close(AVAudioResampleContext *avr); + +/** + * Free AVAudioResampleContext and associated AVOption values. + * + * This also calls avresample_close() before freeing. + * + * @param avr audio resample context + */ +void avresample_free(AVAudioResampleContext **avr); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libavresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param normalize if 1, coefficients will be normalized to prevent + * overflow. if 0, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, int normalize, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding); + +/** + * Get the current channel mixing matrix. + * + * If no custom matrix has been previously set or the AVAudioResampleContext is + * not open, an error is returned. + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_get_matrix(AVAudioResampleContext *avr, double *matrix, + int stride); + +/** + * Set channel mixing matrix. + * + * Allows for setting a custom mixing matrix, overriding the default matrix + * generated internally during avresample_open(). This function can be called + * anytime on an allocated context, either before or after calling + * avresample_open(), as long as the channel layouts have been set. + * avresample_convert() always uses the current matrix. + * Calling avresample_close() on the context will clear the current matrix. + * + * @see avresample_close() + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_matrix(AVAudioResampleContext *avr, const double *matrix, + int stride); + +/** + * Set a customized input channel mapping. + * + * This function can only be called when the allocated context is not open. + * Also, the input channel layout must have already been set. + * + * Calling avresample_close() on the context will clear the channel mapping. + * + * The map for each input channel specifies the channel index in the source to + * use for that particular channel, or -1 to mute the channel. Source channels + * can be duplicated by using the same index for multiple input channels. + * + * Examples: + * + * Reordering 5.1 AAC order (C,L,R,Ls,Rs,LFE) to FFmpeg order (L,R,C,LFE,Ls,Rs): + * { 1, 2, 0, 5, 3, 4 } + * + * Muting the 3rd channel in 4-channel input: + * { 0, 1, -1, 3 } + * + * Duplicating the left channel of stereo input: + * { 0, 0 } + * + * @param avr audio resample context + * @param channel_map customized input channel mapping + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_channel_mapping(AVAudioResampleContext *avr, + const int *channel_map); + +/** + * Set compensation for resampling. + * + * This can be called anytime after avresample_open(). If resampling is not + * automatically enabled because of a sample rate conversion, the + * "force_resampling" option must have been set to 1 when opening the context + * in order to use resampling compensation. + * + * @param avr audio resample context + * @param sample_delta compensation delta, in samples + * @param compensation_distance compensation distance, in samples + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_compensation(AVAudioResampleContext *avr, int sample_delta, + int compensation_distance); + +/** + * Convert input samples and write them to the output FIFO. + * + * The upper bound on the number of output samples is given by + * avresample_available() + (avresample_get_delay() + number of input samples) * + * output sample rate / input sample rate. + * + * The output data can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to avresample_read(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. avresample_get_delay() tells the number of remaining + * samples. To get this data as output, call avresample_convert() with NULL + * input. + * + * At the end of the conversion process, there may be data remaining in the + * internal FIFO buffer. avresample_available() tells the number of remaining + * samples. To get this data as output, either call avresample_convert() with + * NULL input or call avresample_read(). + * + * @see avresample_available() + * @see avresample_read() + * @see avresample_get_delay() + * + * @param avr audio resample context + * @param output output data pointers + * @param out_plane_size output plane size, in bytes. + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * output, which could slow down some conversions. + * @param out_samples maximum number of samples that the output buffer can hold + * @param input input data pointers + * @param in_plane_size input plane size, in bytes + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * input, which could slow down some conversions. + * @param in_samples number of input samples to convert + * @return number of samples written to the output buffer, + * not including converted samples added to the internal + * output FIFO + */ +int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, + int out_plane_size, int out_samples, uint8_t **input, + int in_plane_size, int in_samples); + +/** + * Return the number of samples currently in the resampling delay buffer. + * + * When resampling, there may be a delay between the input and output. Any + * unconverted samples in each call are stored internally in a delay buffer. + * This function allows the user to determine the current number of samples in + * the delay buffer, which can be useful for synchronization. + * + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples currently in the resampling delay buffer + */ +int avresample_get_delay(AVAudioResampleContext *avr); + +/** + * Return the number of available samples in the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. The samples in the FIFO can be read with avresample_read() or + * avresample_convert(). + * + * @see avresample_read() + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples available for reading + */ +int avresample_available(AVAudioResampleContext *avr); + +/** + * Read samples from the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. This function can be used to read samples from that internal FIFO. + * + * @see avresample_available() + * @see avresample_convert() + * + * @param avr audio resample context + * @param output output data pointers. May be NULL, in which case + * nb_samples of data is discarded from output FIFO. + * @param nb_samples number of samples to read from the FIFO + * @return the number of samples written to output + */ +int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples); + +/** + * @} + */ + +#endif /* AVRESAMPLE_AVRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/version.h new file mode 100644 index 0000000..bd52ca6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavresample/version.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_VERSION_H +#define AVRESAMPLE_VERSION_H + +/** + * @file + * @ingroup lavr + * Libavresample version macros. + */ + +#define LIBAVRESAMPLE_VERSION_MAJOR 1 +#define LIBAVRESAMPLE_VERSION_MINOR 1 +#define LIBAVRESAMPLE_VERSION_MICRO 0 + +#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_VERSION AV_VERSION(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_BUILD LIBAVRESAMPLE_VERSION_INT + +#define LIBAVRESAMPLE_IDENT "Lavr" AV_STRINGIFY(LIBAVRESAMPLE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_RESAMPLE_CLOSE_OPEN +#define FF_API_RESAMPLE_CLOSE_OPEN (LIBAVRESAMPLE_VERSION_MAJOR < 2) +#endif + +#endif /* AVRESAMPLE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/adler32.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/adler32.h new file mode 100644 index 0000000..8c08d2b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/adler32.h @@ -0,0 +1,52 @@ +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler32 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/aes.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/aes.h new file mode 100644 index 0000000..09efbda --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/aes.h @@ -0,0 +1,65 @@ +/* + * copyright (c) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/attributes.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/attributes.h new file mode 100644 index 0000000..8c0e5b2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/attributes.h @@ -0,0 +1,160 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + + +#if defined(__GNUC__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#ifdef __GNUC__ +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audio_fifo.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audio_fifo.h new file mode 100644 index 0000000..903b8f1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audio_fifo.h @@ -0,0 +1,149 @@ +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audioconvert.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audioconvert.h new file mode 100644 index 0000000..300a67c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/audioconvert.h @@ -0,0 +1,6 @@ + +#include "version.h" + +#if FF_API_AUDIOCONVERT +#include "channel_layout.h" +#endif diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avassert.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avassert.h new file mode 100644 index 0000000..41f5e0e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avassert.h @@ -0,0 +1,66 @@ +/* + * copyright (c) 2010 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speedloss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#else +#define av_assert2(cond) ((void)0) +#endif + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avconfig.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avconfig.h new file mode 100644 index 0000000..f6685b7 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avconfig.h @@ -0,0 +1,8 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 1 +#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 +#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avstring.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avstring.h new file mode 100644 index 0000000..438ef79 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avstring.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with av_free(). + */ +char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to a av_malloced string. + */ +char *av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +int av_isdigit(int c); + +/** + * Locale-independent conversion of ASCII isgraph. + */ +int av_isgraph(int c); + +/** + * Locale-independent conversion of ASCII isspace. + */ +int av_isspace(int c); + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +int av_isxdigit(int c); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strncasecmp(const char *a, const char *b, size_t n); + + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *av_dirname(char *path); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE 0x01 + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT 0x02 + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see av_bprint_escape() + */ +int av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avutil.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avutil.h new file mode 100644 index 0000000..4692c00 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/avutil.h @@ -0,0 +1,320 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * external API header + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref lsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu Common utility functions + * + * @brief + * libavutil contains the code shared across all the other FFmpeg + * libraries + * + * @note In order to use the functions provided by avutil you must include + * the specific header. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Maths + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup lavu_internal Internal + * + * Not exported functions, for internal usage only + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1< + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/blowfish.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/blowfish.h new file mode 100644 index 0000000..0b00453 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/blowfish.h @@ -0,0 +1,77 @@ +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bprint.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bprint.h new file mode 100644 index 0000000..839ec1e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bprint.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ +#define FF_PAD_STRUCTURE(size, ...) \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct { __VA_ARGS__ })]; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ +typedef struct AVBPrint { + FF_PAD_STRUCTURE(1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; + ) +} AVBPrint; + +/** + * Convenience macros for special values for av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bswap.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bswap.h new file mode 100644 index 0000000..06f6548 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_BFIN +# include "bfin/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/buffer.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/buffer.h new file mode 100644 index 0000000..b4399fd --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/buffer.h @@ -0,0 +1,274 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- av_buffer_alloc() to just allocate a new buffer, and + * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with av_buffer_ref(). + * Use av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The av_buffer_is_writable() function is + * provided to check whether this is true and av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *av_buffer_alloc(int size); + +/** + * Same as av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls av_free() on the buffer data. + * This function is meant to be passed to av_buffer_create(), not called + * directly. + */ +void av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until av_buffer_ref() is called on buf. + */ +int av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by av_buffer_create. + */ +void *av_buffer_get_opaque(const AVBufferRef *buf); + +int av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with av_realloc() only if it was + * initially allocated through av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to + * get a reference to a new buffer, similar to av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with av_buffer_pool_init() and freed with + * av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + * @see av_buffer_pool_can_uninit() + */ +void av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/channel_layout.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/channel_layout.h new file mode 100644 index 0000000..ba4f96d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/channel_layout.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2006 Michael Niedermayer + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel convenience macros + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_NB +}; + +/** + * @} + */ + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, optionally followed by 'c', yielding + * the default channel layout for that number of channels (@see + * av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * @warning Starting from the next major bump the trailing character + * 'c' to specify a number of channels will be required, while a + * channel layout mask could also be specified as a decimal number + * (if and only if not followed by "c"). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t av_get_channel_layout(const char *name); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/common.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/common.h new file mode 100644 index 0000000..a676b1c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/common.h @@ -0,0 +1,464 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#include +#include +#include +#include +#include +#include +#include + +#include "attributes.h" +#include "version.h" +#include "avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* assume a>0 and b>0 */ +#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +/* misc math functions */ + +/** + * Reverse the order of the bits of an 8-bits unsigned integer. + */ +#if FF_API_AV_REVERSE +extern attribute_deprecated const uint8_t av_reverse[256]; +#endif + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef av_log2 +av_const int av_log2(unsigned v); +#endif + +#ifndef av_log2_16bit +av_const int av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<> 31 & ((1<= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= GET_BYTE;\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= GET_BYTE - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/cpu.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/cpu.h new file mode 100644 index 0000000..55c3ec9 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/cpu.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +// #if LIBAVUTIL_VERSION_MAJOR <52 +#define AV_CPU_FLAG_CMOV 0x1001000 ///< supports cmov instruction +// #else +// #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +// #endif +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by av_force_cpu_flags() if that was used + * before. So av_get_cpu_flags() can easily be used in a application to + * detect the enabled cpu flags. + */ +int av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + * + * @warning this function is not thread safe. + */ +attribute_deprecated void av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int av_cpu_count(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/crc.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/crc.h new file mode 100644 index 0000000..f4219ca --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/crc.h @@ -0,0 +1,85 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include +#include +#include "attributes.h" + +/** + * @defgroup lavu_crc32 CRC32 + * @ingroup lavu_crypto + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_24_IEEE = 12, + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see av_crc_init() "le" parameter + */ +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/dict.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/dict.h new file mode 100644 index 0000000..38f03a4 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/dict.h @@ -0,0 +1,152 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + * @code + * AVDictionary *d = NULL; // "create" an empty dictionary + * av_dict_set(&d, "foo", "bar", 0); // add an entry + * + * char *k = av_strdup("key"); // if your strings are already allocated, + * char *v = av_strdup("value"); // you can avoid copying them like this + * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + * + * AVDictionaryEntry *t = NULL; + * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + * <....> // iterate over all entries in d + * } + * + * av_dict_free(&d); + * @endcode + * + */ + +#define AV_DICT_MATCH_CASE 1 +#define AV_DICT_IGNORE_SUFFIX 2 +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() and children. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() and chilren. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param flags Allows case as well as suffix-insensitive comparisons. + * @return Found entry or NULL, changing key or value leads to undefined behavior. + */ +AVDictionaryEntry * +av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will be av_strduped depending on flags) + * @param value entry value to add to *pm (will be av_strduped depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Parse the key/value pairs list and add to a dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + */ +void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void av_dict_free(AVDictionary **m); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/error.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/error.h new file mode 100644 index 0000000..f3fd7bb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/error.h @@ -0,0 +1,117 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include +#include + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. + +#define AV_ERROR_MAX_STRING_SIZE 64 + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/eval.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/eval.h new file mode 100644 index 0000000..6159b0f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/eval.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2002 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with av_expr_parse(). + */ +void av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value for + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/fifo.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/fifo.h new file mode 100644 index 0000000..849b9a6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/fifo.h @@ -0,0 +1,144 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc(unsigned int size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void av_fifo_free(AVFifoBuffer *f); + +/** + * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int av_fifo_size(AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int av_fifo_space(AVFifoBuffer *f); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/file.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/file.h new file mode 100644 index 0000000..a7364fe --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/file.h @@ -0,0 +1,66 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * The returned buffer must be released with av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by av_file_map() + */ +void av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or -1 on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + */ +int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/frame.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/frame.h new file mode 100644 index 0000000..9570bfc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/frame.h @@ -0,0 +1,659 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include + +#include "version.h" + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" + +enum AVColorSpace{ + AVCOL_SPC_RGB = 0, + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_FCC = 4, + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above + AVCOL_SPC_SMPTE240M = 7, + AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_NB , ///< Not part of ABI +}; +#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG + +enum AVColorRange{ + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB , ///< Not part of ABI +}; + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, +}; + +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * Similarly fields that are marked as to be only accessed by + * av_opt_ptr() can be reordered. This allows 2 forks to add fields + * without breaking compatibility with each other. + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiplies of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + uint8_t *base[AV_NUM_DATA_POINTERS]; +#endif + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + */ + int64_t pkt_pts; + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int reference; + + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + /** + * mbskip_table[mb]>=1 if MB didn't change + * stride= mb_width = (width+15)>>4 + */ + attribute_deprecated + uint8_t *mbskip_table; + + /** + * motion vector table + * @code + * example: + * int mv_sample_log2= 4 - motion_subsample_log2; + * int mb_width= (width+15)>>4; + * int mv_stride= (mb_width << mv_sample_log2) + 1; + * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; + * @endcode + */ + attribute_deprecated + int16_t (*motion_val[2])[2]; + + /** + * macroblock type table + * mb_type_base + mb_width + 2 + */ + attribute_deprecated + uint32_t *mb_type; + + /** + * DCT coefficients + */ + attribute_deprecated + short *dct_coeff; + + /** + * motion reference frame index + * the order in which these are stored can depend on the codec. + */ + attribute_deprecated + int8_t *ref_index[2]; +#endif + + /** + * for some private data of the user + */ + void *opaque; + + /** + * error + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int type; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int buffer_hints; + + /** + * Pan scan. + */ + attribute_deprecated + struct AVPanScan *pan_scan; +#endif + + /** + * reordered opaque 64bit (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + +#if FF_API_AVFRAME_LAVC + /** + * @deprecated this field is unused + */ + attribute_deprecated void *hwaccel_picture_private; + + attribute_deprecated + struct AVCodecContext *owner; + attribute_deprecated + void *thread_opaque; + + /** + * log2 of the size of the block which a single vector in motion_val represents: + * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) + */ + attribute_deprecated + uint8_t motion_subsample_log2; +#endif + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * Code outside libavcodec should access this field using: + * av_frame_get_best_effort_timestamp(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_pos(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_duration(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * Code outside libavcodec should access this field using: + * av_frame_get_metadata(frame) + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * Code outside libavcodec should access this field using: + * av_frame_get_decode_error_flags(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * Code outside libavcodec should access this field using: + * av_frame_get_channels(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. It must be accessed using av_frame_get_pkt_size() and + * av_frame_set_pkt_size(). + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + + /** + * YUV colorspace type. + * It must be accessed using av_frame_get_colorspace() and + * av_frame_set_colorspace(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * It must be accessed using av_frame_get_color_range() and + * av_frame_set_color_range(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + + /** + * Not to be accessed directly from outside libavutil + */ + AVBufferRef *qp_table_buf; +} AVFrame; + +/** + * Accessors for some AVFrame fields. + * The position of these field in the structure is not part of the ABI, + * they should not be accessed directly outside libavcodec. + */ +int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); +void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_duration (const AVFrame *frame); +void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_pos (const AVFrame *frame); +void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +int64_t av_frame_get_channel_layout (const AVFrame *frame); +void av_frame_set_channel_layout (AVFrame *frame, int64_t val); +int av_frame_get_channels (const AVFrame *frame); +void av_frame_set_channels (AVFrame *frame, int val); +int av_frame_get_sample_rate (const AVFrame *frame); +void av_frame_set_sample_rate (AVFrame *frame, int val); +AVDictionary *av_frame_get_metadata (const AVFrame *frame); +void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +int av_frame_get_decode_error_flags (const AVFrame *frame); +void av_frame_set_decode_error_flags (AVFrame *frame, int val); +int av_frame_get_pkt_size(const AVFrame *frame); +void av_frame_set_pkt_size(AVFrame *frame, int val); +AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); +int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame); +void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +enum AVColorRange av_frame_get_color_range(const AVFrame *frame); +void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Setup a new reference to the data described by a given frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everythnig contained in src to dst and reset src. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +#endif /* AVUTIL_FRAME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/hmac.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/hmac.h new file mode 100644 index 0000000..d36d4de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/hmac.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include + +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224 = 10, + AV_HMAC_SHA256, + AV_HMAC_SHA384, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/imgutils.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/imgutils.h new file mode 100644 index 0000000..ab32d66 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/imgutils.h @@ -0,0 +1,200 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param[in] align the assumed linesize alignment + */ +int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesizes linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat_readwrite.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat_readwrite.h new file mode 100644 index 0000000..9709f4d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intfloat_readwrite.h @@ -0,0 +1,40 @@ +/* + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_READWRITE_H +#define AVUTIL_INTFLOAT_READWRITE_H + +#include +#include "attributes.h" + +/* IEEE 80 bits extended float */ +typedef struct AVExtFloat { + uint8_t exponent[2]; + uint8_t mantissa[8]; +} AVExtFloat; + +attribute_deprecated double av_int2dbl(int64_t v) av_const; +attribute_deprecated float av_int2flt(int32_t v) av_const; +attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const; +attribute_deprecated int64_t av_dbl2int(double d) av_const; +attribute_deprecated int32_t av_flt2int(float d) av_const; +attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const; + +#endif /* AVUTIL_INTFLOAT_READWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intreadwrite.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intreadwrite.h new file mode 100644 index 0000000..7ee6977 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/intreadwrite.h @@ -0,0 +1,621 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lfg.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lfg.h new file mode 100644 index 0000000..ec90562 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lfg.h @@ -0,0 +1,62 @@ +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/log.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/log.h new file mode 100644 index 0000000..55459e8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/log.h @@ -0,0 +1,313 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_NB, ///< not part of ABI/API +}AVClassCategory; + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param ap The arguments referenced by the format string. + */ +void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* av_default_item_name(void* ctx); +AVClassCategory av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formated line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * av_dlog macros + * Useful to print debug messages that shouldn't get compiled in normally. + */ + +#ifdef DEBUG +# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif + +/** + * Skip repeated messages, this requires the user app to use av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 +void av_log_set_flags(int arg); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lzo.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lzo.h new file mode 100644 index 0000000..c034039 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mathematics.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mathematics.h new file mode 100644 index 0000000..71f0392 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mathematics.h @@ -0,0 +1,147 @@ +/* + * copyright (c) 2005-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include +#include +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * @{ + */ + + +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE +}; + +/** + * Return the greatest common divisor of a and b. + * If both a and b are 0 or either or both are <0 then behavior is + * undefined. + */ +int64_t av_const av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * A simple a*b/c isn't possible as it can overflow. + */ +int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * A simple a*b/c isn't possible as it can overflow. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding) av_const; + +/** + * Compare 2 timestamps each in its own timebases. + * The result of the function is undefined if one of the timestamps + * is outside the int64_t range when represented in the others timebase. + * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position + */ +int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare 2 integers modulo mod. + * That is we compare integers a and b for which only the least + * significant log2(mod) bits are known. + * + * @param mod must be a power of 2 + * @return a negative value if a is smaller than b + * a positive value if a is greater than b + * 0 if a equals b + */ +int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * @param in_ts Input timestamp + * @param in_tb Input timesbase + * @param fs_tb Duration and *last timebase + * @param duration duration till the next call + * @param out_tb Output timesbase + */ +int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/md5.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/md5.h new file mode 100644 index 0000000..79702c8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/md5.h @@ -0,0 +1,81 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size av_md5_size) + */ +void av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mem.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mem.h new file mode 100644 index 0000000..77b7adc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/mem.h @@ -0,0 +1,342 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include +#include + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * @{ + */ + + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__TI_COMPILER_VERSION__) + #define DECLARE_ALIGNED(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + t __attribute__((aligned(n))) v + #define DECLARE_ASM_CONST(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + static const t __attribute__((aligned(n))) v +#elif defined(__GNUC__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU). + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + */ +void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of size * nmemb bytes with av_malloc(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_malloc() + */ +av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_malloc(nmemb * size); +} + +/** + * Allocate or reallocate a block of memory. + * If ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param size Size in bytes of the memory block to be allocated or + * reallocated. + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + * @see av_fast_realloc() + */ +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate or reallocate a block of memory. + * This function does the same thing as av_realloc, except: + * - It takes two arguments and checks the result of the multiplication for + * integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic "buf = realloc(buf); if (!buf) return -1;". + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate or reallocate a block of memory. + * If *ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param size Size in bytes for the memory block to be allocated or + * reallocated + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_reallocp(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +int av_reallocp(void *ptr, size_t size); + +/** + * Allocate or reallocate an array. + * If ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate or reallocate an array through a pointer to a pointer. + * If *ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc(). + * @param ptr Pointer to the memory block which should be freed. + * @note ptr = NULL is explicitly allowed. + * @note It is recommended that you use av_freep() instead. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if it cannot be allocated. + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of nmemb * size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * The allocation will fail if nmemb * size is greater than or equal + * to INT_MAX. + * @param nmemb + * @param size + * @return Pointer to the allocated block, NULL if it cannot be allocated. + */ +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate a block of size * nmemb bytes with av_mallocz(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + * @see av_malloc_array() + */ +av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_mallocz(nmemb * size); +} + +/** + * Duplicate the string s. + * @param s string to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of s or NULL if the string cannot be allocated. + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate the buffer p. + * @param p buffer to be duplicated + * @return Pointer to a newly allocated buffer containing a + * copy of p or NULL if the buffer cannot be allocated. + */ +void *av_memdup(const void *p, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc() and set the pointer pointing to it to NULL. + * @param ptr Pointer to the pointer to the memory block which should + * be freed. + * @see av_free() + */ +void av_freep(void *ptr); + +/** + * Add an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem element to add + * @see av_dynarray2_add() + */ +void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size elem_size to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem_size size in bytes of the elements in the array + * @param elem_data pointer to the data of the element to add. If NULL, the space of + * the new added element is not filled. + * @return pointer to the data of the element to copy in the new allocated space. + * If NULL, the new allocated space is left uninitialized." + * @see av_dynarray_add() + */ +void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * Multiply two size_t values checking for overflow. + * @return 0 if success, AVERROR(EINVAL) if overflow. + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: only try the division if nelem and elsize + * are both greater than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may me allocated in one block. + */ +void av_max_alloc(size_t max); + +/** + * deliberately overlapping memcpy implementation + * @param dst destination buffer + * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 + * @param cnt number of bytes to copy, must be >= 0 + * + * cnt > back is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of back. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/murmur3.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/murmur3.h new file mode 100644 index 0000000..f29ed97 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/murmur3.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include + +struct AVMurMur3 *av_murmur3_alloc(void); +void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); +void av_murmur3_init(struct AVMurMur3 *c); +void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/old_pix_fmts.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/old_pix_fmts.h new file mode 100644 index 0000000..3ee8aec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/old_pix_fmts.h @@ -0,0 +1,175 @@ +/* + * copyright (c) 2006-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OLD_PIX_FMTS_H +#define AVUTIL_OLD_PIX_FMTS_H + +/* + * This header exists to prevent new pixel formats from being accidentally added + * to the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVPixelFormat enum instead. + */ + PIX_FMT_NONE = AV_PIX_FMT_NONE, + PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + PIX_FMT_GRAY8, ///< Y , 8bpp + PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + PIX_FMT_XVMC_MPEG2_IDCT, + PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus + //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately + //is better + PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian + PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian + PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian + PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian + PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian + PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian + PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian + PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian + PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian + + PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +#endif /* AVUTIL_OLD_PIX_FMTS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/opt.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/opt.h new file mode 100644 index 0000000..14faa6e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/opt.h @@ -0,0 +1,757 @@ +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = av_malloc(sizeof(*ret)); + * ret->class = &test_class; + * av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * av_opt_free(*foo); + * av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are av_opt_next(), which iterates + * over all options defined for one object, and av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call av_opt_child_next() recursively (and + * av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to av_opt_set() is enough. For + * non-string type options, av_opt_set() will parse the string according to the + * option type. + * + * Similarly av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This allows to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_CONST = 128, + AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), + AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), + AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational + AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), + AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), +#if FF_API_OLD_AVOPTIONS + FF_OPT_TYPE_FLAGS = 0, + FF_OPT_TYPE_INT, + FF_OPT_TYPE_INT64, + FF_OPT_TYPE_DOUBLE, + FF_OPT_TYPE_FLOAT, + FF_OPT_TYPE_STRING, + FF_OPT_TYPE_RATIONAL, + FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + FF_OPT_TYPE_CONST=128, +#endif +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + double value_min, value_max; ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count + double component_min, component_max; ///< For string this represents the unicode range for chars, 0-127 limits to ASCII + int is_range; ///< if set to 1 the struct encodes a range, if set to 0 a single value +} AVOptionRange; + +/** + * List of AVOptionRange structs + */ +typedef struct AVOptionRanges { + AVOptionRange **range; + int nb_ranges; +} AVOptionRanges; + + +#if FF_API_FIND_OPT +/** + * Look for an option in obj. Look only for the options which + * have the flags set as specified in mask and flags (that is, + * for which it is the case that (opt->flags & mask) == flags). + * + * @param[in] obj a pointer to a struct whose first element is a + * pointer to an AVClass + * @param[in] name the name of the option to look for + * @param[in] unit the unit of the option to look for, or any if NULL + * @return a pointer to the option found, or NULL if no option + * has been found + * + * @deprecated use av_opt_find. + */ +attribute_deprecated +const AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags); +#endif + +#if FF_API_OLD_AVOPTIONS +/** + * Set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an + * AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. If the field is not of a string + * type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param[out] o_out if non-NULL put here a pointer to the AVOption + * found + * @param alloc this parameter is currently ignored + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + * @deprecated use av_opt_set() + */ +attribute_deprecated +int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out); + +attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n); +attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n); +attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n); + +double av_get_double(void *obj, const char *name, const AVOption **o_out); +AVRational av_get_q(void *obj, const char *name, const AVOption **o_out); +int64_t av_get_int(void *obj, const char *name, const AVOption **o_out); +attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len); +attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last); +#endif + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void av_opt_set_defaults(void *s); + +#if FF_API_OLD_AVOPTIONS +attribute_deprecated +void av_opt_set_defaults2(void *s, int mask, int flags); +#endif + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + */ +int av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all string and binary options in obj. + */ +void av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict(void *obj, struct AVDictionary **options); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using av_free() + * @param rval parsed value; must be freed using av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the + given object first. */ +/** + * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ 0x0002 + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with av_set_string3(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *av_opt_next(void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int av_opt_set_double(void *obj, const char *name, double val, int search_flags); +int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller + */ +int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val); +int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_freep_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_free_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/parseutils.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/parseutils.h new file mode 100644 index 0000000..c80f0de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/parseutils.h @@ -0,0 +1,187 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * In particular it actually supports the parameters: + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this + * function call, or NULL in case the function fails to match all of + * the fmt string and therefore an error occurred + */ +char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixdesc.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixdesc.h new file mode 100644 index 0000000..e88bf9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixdesc.h @@ -0,0 +1,291 @@ +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include + +#include "attributes.h" +#include "pixfmt.h" + +typedef struct AVComponentDescriptor{ + uint16_t plane :2; ///< which of the 4 planes contains the component + + /** + * Number of elements between 2 horizontally consecutive pixels minus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t step_minus1 :3; + + /** + * Number of elements before the component of the first pixel plus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t offset_plus1 :3; + uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value + uint16_t depth_minus1 :4; ///< number of bits in the component minus 1 +}AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor{ + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = -((-luma_width) >> log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w) + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= -((-luma_height) >> log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + uint8_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 2 or 4 components, then alpha is last. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components, + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + */ + AVComponentDescriptor comp[4]; +}AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) +/** + * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as + * paletted internally, but the palette is generated by the decoder and is not + * stored in the file. + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) +/** + * The pixel format has an alpha channel. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +#if FF_API_PIX_FMT +/** + * @deprecated use the AV_PIX_FMT_FLAG_* flags + */ +#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE +#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL +#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM +#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL +#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR +#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB +#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL +#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA +#endif + +#if FF_API_PIX_FMT_DESC +/** + * The array of all the pixel format descriptors. + */ +extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[]; +#endif + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt); + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * See avcodec_get_chroma_sub_sample() for a function that asserts a + * valid pixel format instead of returning an error code. + * Its recommanded that you use avcodec_get_chroma_sub_sample unless + * you do check the return code! + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +void ff_check_pixfmt_descriptors(void); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixfmt.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixfmt.h new file mode 100644 index 0000000..6f55828 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/pixfmt.h @@ -0,0 +1,397 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + * + */ + +#include "avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @par + * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + * + * @note + * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1 + * and that all newly added little-endian formats have (pix_fmt & 1) == 0. + * This allows simpler detection of big vs little-endian. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + AV_PIX_FMT_XVMC_MPEG2_IDCT, + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + + /** + * duplicated pixel formats for compatibility with libav. + * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55) + * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85) + */ + AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions + +#if FF_API_PIX_FMT +#include "old_pix_fmts.h" +#endif +}; + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV +#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV +#endif + + +#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A +#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) + +#if FF_API_PIX_FMT +#define PixelFormat AVPixelFormat + +#define PIX_FMT_Y400A AV_PIX_FMT_Y400A +#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P + +#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le) + +#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 +#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1 +#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32 +#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1 +#define PIX_FMT_0RGB32 AV_PIX_FMT_0RGB32 +#define PIX_FMT_0BGR32 AV_PIX_FMT_0BGR32 + +#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16 +#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48 +#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565 +#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555 +#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444 +#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48 +#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565 +#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555 +#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444 + +#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9 +#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9 +#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9 +#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10 +#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10 +#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10 +#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12 +#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12 +#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12 +#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14 +#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14 +#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14 +#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16 +#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16 +#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16 + +#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64 +#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64 +#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9 +#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10 +#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12 +#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14 +#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16 +#endif + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/random_seed.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/random_seed.h new file mode 100644 index 0000000..0462a04 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/random_seed.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/rational.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/rational.h new file mode 100644 index 0000000..b9800ee --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/rational.h @@ -0,0 +1,155 @@ +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * rational numbers + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_math + * @{ + */ + +/** + * rational number numerator/denominator + */ +typedef struct AVRational{ + int num; ///< numerator + int den; ///< denominator +} AVRational; + +/** + * Compare two rationals. + * @param a first rational + * @param b second rational + * @return 0 if a==b, 1 if a>b, -1 if a>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert rational to double. + * @param a rational to convert + * @return (double) a + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * This is useful for framerate calculations. + * @param dst_num destination numerator + * @param dst_den destination denominator + * @param num source numerator + * @param den source denominator + * @param max the maximum allowed for dst_num & dst_den + * @return 1 if exact, 0 otherwise + */ +int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b first rational + * @param c second rational + * @return b*c + */ +AVRational av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b first rational + * @param c second rational + * @return b/c + */ +AVRational av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b first rational + * @param c second rational + * @return b+c + */ +AVRational av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b first rational + * @param c second rational + * @return b-c + */ +AVRational av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * inf is expressed as {1,0} or {-1,0} depending on the sign. + * + * @param d double to convert + * @param max the maximum allowed numerator and denominator + * @return (AVRational) d + */ +AVRational av_d2q(double d, int max) av_const; + +/** + * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer + * than q1, 0 if they have the same distance. + */ +int av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the nearest value in q_list to q. + * @param q_list an array of rationals terminated by {0, 0} + * @return the index of the nearest value found in the array + */ +int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/ripemd.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/ripemd.h new file mode 100644 index 0000000..7b0c8bc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/ripemd.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/samplefmt.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/samplefmt.h new file mode 100644 index 0000000..db17d43 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/samplefmt.h @@ -0,0 +1,256 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include + +#include "avutil.h" +#include "attributes.h" + +/** + * Audio Sample Formats + * + * @par + * The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * @par + * The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * @par + * The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +#if FF_API_GET_BITS_PER_SAMPLE_FMT +/** + * @deprecated Use av_get_bytes_per_sample() instead. + */ +attribute_deprecated +int av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt); +#endif + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see av_samples_fill_arrays() + * @see av_samples_alloc_array_and_samples() + */ +int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see av_samples_alloc() + */ +int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha.h new file mode 100644 index 0000000..bf4377e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha512.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha512.h new file mode 100644 index 0000000..7b08701 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/sha512.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA512 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/time.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/time.h new file mode 100644 index 0000000..90eb436 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/time.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include + +/** + * Get the current time in microseconds. + */ +int64_t av_gettime(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timecode.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timecode.h new file mode 100644 index 0000000..56e3975 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timecode.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 16 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timestamp.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timestamp.h new file mode 100644 index 0000000..f63a08c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/timestamp.h @@ -0,0 +1,74 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/version.h new file mode 100644 index 0000000..dd70beb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/version.h @@ -0,0 +1,153 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +/** + * @defgroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +#define AV_PRAGMA(s) _Pragma(#s) + +/** + * @} + */ + +/** + * @defgroup version_utils Library Version Macros + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * @} + */ + + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 52 +#define LIBAVUTIL_VERSION_MINOR 48 +#define LIBAVUTIL_VERSION_MICRO 101 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @} + * + * @defgroup depr_guards Deprecation guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @{ + */ + +#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT +#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_FIND_OPT +#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_OLD_AVOPTIONS +#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT +#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CONTEXT_SIZE +#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT_DESC +#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AV_REVERSE +#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AUDIOCONVERT +#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CPU_FLAG_MMX2 +#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO +#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS_PRIVATE +#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS1 +#define FF_API_LLS1 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AVFRAME_LAVC +#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT +#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif + +/** + * @} + */ + +#endif /* AVUTIL_VERSION_H */ + diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/xtea.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/xtea.h new file mode 100644 index 0000000..0899c92 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libavutil/xtea.h @@ -0,0 +1,62 @@ +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include + +/** + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption + */ +void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/swresample.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/swresample.h new file mode 100644 index 0000000..3811301 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/swresample.h @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr Libswresample + * @{ + * + * Libswresample (lswr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * SwrContext *swr = swr_alloc(); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once all values have been set, it must be initialized with swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * as described above, or by using swr_alloc_set_opts(), then call swr_init() + * again. + * + * The conversion itself is done by repeatedly calling swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * swr_convert() with NULL in and 0 in_count. + * + * The delay between input and output, can at any time be found by using + * swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with swr_free(). + * There will be no memory leak if the data is not completely flushed before + * swr_free(). + */ + +#include +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +#if LIBSWRESAMPLE_VERSION_MAJOR < 1 +#define SWR_CH_MAX 32 ///< Maximum number of channels +#endif + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for swrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *swr_get_class(void); + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with swr_alloc_set_opts()) before calling swr_init(). + * + * @see swr_alloc_set_opts(), swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * + * @return AVERROR error code in case of failure. + */ +int swr_init(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s Swr context, can be NULL + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int log_offset, void *log_ctx); + +/** + * Free the given SwrContext and set the pointer to NULL. + */ +void swr_free(struct SwrContext **s); + +/** + * Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space then the input will be buffered. + * You can avoid this buffering by providing more output space than input. + * Convertion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers + * + * @param pts timestamp for the next input sample, INT64_MIN if unknown + * @return the output timestamp for the next output sample + */ +int64_t swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * Activate resampling compensation. + */ +int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param s allocated Swr context, not yet initialized + * @param channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return AVERROR error code in case of failure. + */ +int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return AVERROR error code in case of failure. + */ +int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * Drops the specified number of output samples. + */ +int swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + */ +int swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be + * if its set to 1 the returned delay is in seconds + * if its set to 1000 the returned delay is in milli seconds + * if its set to the input sample rate then the returned delay is in input samples + * if its set to the output sample rate then the returned delay is in output samples + * an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate) + * @returns the delay in 1/base units. + */ +int64_t swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Return the LIBSWRESAMPLE_VERSION_INT constant. + */ +unsigned swresample_version(void); + +/** + * Return the swr build-time configuration. + */ +const char *swresample_configuration(void); + +/** + * Return the swr license. + */ +const char *swresample_license(void); + +/** + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/version.h new file mode 100644 index 0000000..464c86d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWR_VERSION_H +#define SWR_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 0 +#define LIBSWRESAMPLE_VERSION_MINOR 17 +#define LIBSWRESAMPLE_VERSION_MICRO 104 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWR_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/swscale.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/swscale.h new file mode 100644 index 0000000..66f63fc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/swscale.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2001-2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup lsws + * external API header + */ + +/** + * @defgroup lsws Libswscale + * @{ + */ + +#include + +#include "avutil.h" +#include "log.h" +#include "pixfmt.h" +#include "version.h" + +/** + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominace subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#if FF_API_SWS_CPU_CAPS +/** + * CPU caps are autodetected now, those flags + * are only provided for API compatibility. + */ +#define SWS_CPU_CAPS_MMX 0x80000000 +#define SWS_CPU_CAPS_MMXEXT 0x20000000 +#define SWS_CPU_CAPS_MMX2 0x20000000 +#define SWS_CPU_CAPS_3DNOW 0x40000000 +#define SWS_CPU_CAPS_ALTIVEC 0x10000000 +#define SWS_CPU_CAPS_BFIN 0x01000000 +#define SWS_CPU_CAPS_SSE2 0x02000000 +#endif + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * sws_init_context(). For filling see AVOptions, options.c and + * sws_setColorspaceDetails(). + */ +struct SwsContext *sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void sws_freeContext(struct SwsContext *swsContext); + +#if FF_API_SWS_GETCONTEXT +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + * @deprecated Use sws_getCachedContext() instead. + */ +struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); +#endif + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *sws_getGaussianVec(double variance, double quality); + +/** + * Allocate and return a vector with length coefficients, all + * with the same value c. + */ +SwsVector *sws_getConstVec(double c, int length); + +/** + * Allocate and return a vector with just one coefficient, with + * value 1.0. + */ +SwsVector *sws_getIdentityVec(void); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void sws_normalizeVec(SwsVector *a, double height); +void sws_convVec(SwsVector *a, SwsVector *b); +void sws_addVec(SwsVector *a, SwsVector *b); +void sws_subVec(SwsVector *a, SwsVector *b); +void sws_shiftVec(SwsVector *a, int shift); + +/** + * Allocate and return a clone of the vector a, that is a vector + * with the same coefficients as a. + */ +SwsVector *sws_cloneVec(SwsVector *a); + +/** + * Print with av_log() a textual representation of the vector a + * if log_level <= av_log_level. + */ +void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); + +void sws_freeVec(SwsVector *a); + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/version.h b/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/version.h new file mode 100644 index 0000000..c5ff0b8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/include/libswscale/version.h @@ -0,0 +1,59 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "avutil.h" + +#define LIBSWSCALE_VERSION_MAJOR 2 +#define LIBSWSCALE_VERSION_MINOR 5 +#define LIBSWSCALE_VERSION_MICRO 101 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_GETCONTEXT +#define FF_API_SWS_GETCONTEXT (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_CPU_CAPS +#define FF_API_SWS_CPU_CAPS (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_FORMAT_NAME +#define FF_API_SWS_FORMAT_NAME (LIBSWSCALE_VERSION_MAJOR < 3) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavcodec.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavcodec.a new file mode 100644 index 0000000..6cdec48 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavcodec.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavdevice.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavdevice.a new file mode 100644 index 0000000..edcde96 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavdevice.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavfilter.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavfilter.a new file mode 100644 index 0000000..91f9acd Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavfilter.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavformat.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavformat.a new file mode 100644 index 0000000..c877ff6 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavformat.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavresample.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavresample.a new file mode 100644 index 0000000..09ba1b3 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libavutil.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavutil.a new file mode 100644 index 0000000..1444d4e Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libavutil.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libswresample.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libswresample.a new file mode 100644 index 0000000..f53b66f Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libswresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/libswscale.a b/Limelight-iOS/libs/ffmpeg/armv7/lib/libswscale.a new file mode 100644 index 0000000..549d39d Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7/lib/libswscale.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavcodec.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavcodec.pc new file mode 100644 index 0000000..9bef50e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavcodec.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavcodec +Description: FFmpeg codec library +Version: 55.39.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavcodec -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavdevice.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavdevice.pc new file mode 100644 index 0000000..151f5f0 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavdevice.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavdevice +Description: FFmpeg device handling library +Version: 55.5.100 +Requires: libavfilter = 3.90.100, libavformat = 55.19.104 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavdevice -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavfilter.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavfilter.pc new file mode 100644 index 0000000..4e4c8ae --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavfilter.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavfilter +Description: FFmpeg audio/video filtering library +Version: 3.90.100 +Requires: libswresample = 0.17.104, libswscale = 2.5.101, libavresample = 1.1.0, libavformat = 55.19.104, libavcodec = 55.39.101, libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavfilter -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavformat.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavformat.pc new file mode 100644 index 0000000..5e1a0c8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavformat.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavformat +Description: FFmpeg container format library +Version: 55.19.104 +Requires: libavcodec = 55.39.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavformat -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavresample.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavresample.pc new file mode 100644 index 0000000..b6b9b9d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavresample +Description: Libav audio resampling library +Version: 1.1.0 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavresample -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavutil.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavutil.pc new file mode 100644 index 0000000..d1dca8f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libavutil.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavutil +Description: FFmpeg utility library +Version: 52.48.101 +Requires: +Requires.private: +Conflicts: +Libs: -L${libdir} -lavutil -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswresample.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswresample.pc new file mode 100644 index 0000000..707cb2f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswresample +Description: FFmpeg audio resampling library +Version: 0.17.104 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswresample -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswscale.pc b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswscale.pc new file mode 100644 index 0000000..4d2b676 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7/lib/pkgconfig/libswscale.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswscale +Description: FFmpeg image rescaling library +Version: 2.5.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswscale -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avcodec.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avcodec.h new file mode 100644 index 0000000..fe64b38 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avcodec.h @@ -0,0 +1,5029 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include +#include "libavutil/samplefmt.h" +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/cpu.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc Encoding/Decoding Library + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + * + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of a existing codec ID changes (that would break ABI), + * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec. + * This ensures that 2 forks can independently add AVCodecIDs without producing conflicts. + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + AV_CODEC_ID_MPEG2VIDEO_XVMC, + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, + AV_CODEC_ID_IFF_BYTERUN1, + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130_DEPRECATED, + AV_CODEC_ID_G2M_DEPRECATED, + AV_CODEC_ID_WEBP_DEPRECATED, + + AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'), + AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + AV_CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + AV_CODEC_ID_EXR = MKBETAG('0','E','X','R'), + AV_CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + AV_CODEC_ID_012V = MKBETAG('0','1','2','V'), + AV_CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + AV_CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + AV_CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'), + AV_CODEC_ID_V308 = MKBETAG('V','3','0','8'), + AV_CODEC_ID_V408 = MKBETAG('V','4','0','8'), + AV_CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + AV_CODEC_ID_SANM = MKBETAG('S','A','N','M'), + AV_CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + AV_CODEC_ID_AVRN = MKBETAG('A','V','R','n'), + AV_CODEC_ID_CPIA = MKBETAG('C','P','I','A'), + AV_CODEC_ID_XFACE = MKBETAG('X','F','A','C'), + AV_CODEC_ID_SGIRLE = MKBETAG('S','G','I','R'), + AV_CODEC_ID_MVC1 = MKBETAG('M','V','C','1'), + AV_CODEC_ID_MVC2 = MKBETAG('M','V','C','2'), + AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'), + AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'), + AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'), + AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'), +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'), + AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'), + AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16), + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '), + AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '), + AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '), + AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '), + AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'), + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE + AV_CODEC_ID_VOXWARE, +#endif + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS_DEPRECATED, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK_DEPRECATED, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + AV_CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + AV_CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'), + AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'), + AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'), + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + AV_CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + AV_CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + AV_CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + AV_CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + AV_CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'), + AV_CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + AV_CODEC_ID_SUBRIP = MKBETAG('S','R','i','p'), + AV_CODEC_ID_WEBVTT = MKBETAG('W','V','T','T'), + AV_CODEC_ID_MPL2 = MKBETAG('M','P','L','2'), + AV_CODEC_ID_VPLAYER = MKBETAG('V','P','l','r'), + AV_CODEC_ID_PJS = MKBETAG('P','h','J','S'), + AV_CODEC_ID_ASS = MKBETAG('A','S','S',' '), ///< ASS as defined in Matroska + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + AV_CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + AV_CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + AV_CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'), + AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'), + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#if FF_API_CODEC_ID +#include "old_codec_ids.h" +#endif +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see avcodec_get_descriptor() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 16 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define FF_MIN_BUFFER_SIZE 16384 + + +/** + * @ingroup lavc_encoding + * motion estimation type. + */ +enum Motion_Est_ID { + ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed + ME_FULL, + ME_LOG, + ME_PHODS, + ME_EPZS, ///< enhanced predictive zonal search + ME_X1, ///< reserved for experiments + ME_HEX, ///< hexagon based search + ME_UMH, ///< uneven multi-hexagon search + ME_TESA, ///< transformed exhaustive search algorithm + ME_ITER=50, ///< iterative search +}; + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVColorPrimaries{ + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_BT470M = 4, + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, + AVCOL_PRI_NB , ///< Not part of ABI +}; + +enum AVColorTransferCharacteristic{ + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_NB , ///< Not part of ABI +}; + +/** + * X X 3 4 X X are luma samples, + * 1 2 1-6 are possible chroma positions + * X X 5 6 X 0 is undefined/unknown position + */ +enum AVChromaLocation{ + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default + AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263 + AVCHROMA_LOC_TOPLEFT = 3, ///< DV + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB , ///< Not part of ABI +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +#define FF_MAX_B_FRAMES 16 + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED 0x0001 +#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. +#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. +#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. +/** + * The parent program guarantees that the input for B-frames containing + * streams is not written to for at least s->max_b_frames+1 frames, if + * this is not set the input will be copied. + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. +#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. +#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. +#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. +#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. +#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random + location instead of only at frame boundaries. */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization. +#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT. +#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay. +#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe. +#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). +/* Fx : Flag for h263+ extra options */ +#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction +#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter +#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation +#define CODEC_FLAG_CLOSED_GOP 0x80000000 +#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. +#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. +#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. +#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!! +#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS. + +#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. +#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 0x0002 +#define CODEC_CAP_TRUNCATED 0x0008 +/* Codec can export data for HW decoding (XvMC). */ +#define CODEC_CAP_HWACCEL 0x0010 +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY 0x0020 +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME 0x0040 +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU 0x0080 +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carring such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES 0x0100 +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL 0x0200 +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF 0x0400 + +/** + * Codec is able to deal with negative linesizes + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 + +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS 0x1000 +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS 0x2000 +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE 0x4000 +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS 0x8000 +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000 +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS 0x80000000 + +//The following defines may change, don't expect compatibility if you use them. +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan{ + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +}AVPanScan; + +#define FF_QSCALE_TYPE_MPEG1 0 +#define FF_QSCALE_TYPE_MPEG2 1 +#define FF_QSCALE_TYPE_H264 2 +#define FF_QSCALE_TYPE_VP56 3 + +#if FF_API_GET_BUFFER +#define FF_BUFFER_TYPE_INTERNAL 1 +#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) +#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. +#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. + +#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). +#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. +#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. +#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). +#endif + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + AV_PKT_DATA_PALETTE, + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES=70, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, +}; + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf or destruct (deprecated) + * fields. If either is set, the packet data is dynamically allocated and is + * valid indefinitely until av_free_packet() is called (which in turn calls + * av_buffer_unref()/the destruct callback to free the data). If neither is set, + * the packet data is typically backed by some static buffer somewhere and is + * only valid for a limited time (e.g. until the next read call when demuxing). + * + * The side data is always allocated with av_malloc() and is freed in + * av_free_packet(). + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + struct { + uint8_t *data; + int size; + enum AVPacketSideDataType type; + } *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int duration; +#if FF_API_DESTRUCT_PACKET + attribute_deprecated + void (*destruct)(struct AVPacket *); + attribute_deprecated + void *priv; +#endif + int64_t pos; ///< byte position in stream, -1 if unknown + + /** + * Time difference in AVStream->time_base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current packet. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; + char codec_name[32]; + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + + /** + * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * - encoding: unused + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int stream_codec_tag; + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream. + */ + int bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * mjpeg: Huffman tables + * rv10: additional flags + * mpeg4: global headers (they can be in the bitstream or here) + * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * - encoding: MUST be set by user. + * - decoding: Set by libavcodec. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this is the number of "priming" samples added to the + * beginning of the stream. The decoded output will be delayed by this + * many samples relative to the input to the encoder. Note that this + * field is purely informational and does not directly affect the pts + * output by the encoder, which should always be based on the actual + * presentation time, including any delay. + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required. + */ + int coded_width, coded_height; + +#define FF_ASPECT_EXTENDED 15 + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec if known + */ + enum AVPixelFormat pix_fmt; + + /** + * Motion estimation algorithm used for video coding. + * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex), + * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific] + * - encoding: MUST be set by user. + * - decoding: unused + */ + int me_method; + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + + /** obsolete FIXME remove */ + int rc_strategy; +#define FF_RC_STRATEGY_XVID 1 + + int b_frame_strategy; + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + + /** + * 0-> h263 quant 1-> mpeg quant + * - encoding: Set by user. + * - decoding: unused + */ + int mpeg_quant; + + /** + * qscale factor between P and I-frames + * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + /** + * prediction method (needed for huffyuv) + * - encoding: Set by user. + * - decoding: unused + */ + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + + /** + * prepass for motion estimation + * - encoding: Set by user. + * - decoding: unused + */ + int pre_me; + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + + /** + * DTG active format information (additional aspect ratio + * information only used in DVB MPEG-2 transport streams) + * 0 if not set. + * + * - encoding: unused + * - decoding: Set by decoder. + */ + int dtg_active_format; +#define FF_DTG_AFD_SAME 8 +#define FF_DTG_AFD_4_3 9 +#define FF_DTG_AFD_16_9 10 +#define FF_DTG_AFD_14_9 11 +#define FF_DTG_AFD_4_3_SP_14_9 13 +#define FF_DTG_AFD_16_9_SP_14_9 14 +#define FF_DTG_AFD_SP_4_3 15 + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + + /** + * intra quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int intra_quant_bias; +#define FF_DEFAULT_QUANT_BIAS 999999 + + /** + * inter quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int inter_quant_bias; + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + + /** + * XVideo Motion Acceleration + * - encoding: forbidden + * - decoding: set by decoder + */ + int xvmc_acceleration; + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + + /** + * scene change detection threshold + * 0 is default, larger means fewer detected scene changes. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_threshold; + + /** + * noise reduction strength + * - encoding: Set by user. + * - decoding: unused + */ + int noise_reduction; + + /** + * Motion estimation threshold below which no motion estimation is + * performed, but instead the user specified motion vectors are used. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_threshold; + + /** + * Macroblock threshold below which the user specified macroblock types will be used. + * - encoding: Set by user. + * - decoding: unused + */ + int mb_threshold; + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: unused + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + + /** + * Border processing masking, raises the quantizer for mbs on the borders + * of the picture. + * - encoding: Set by user. + * - decoding: unused + */ + float border_masking; + + /** + * minimum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_penalty_compensation; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int brd_scale; + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + + /** + * chroma qp offset from luma + * - encoding: Set by user. + * - decoding: unused + */ + int chromaoffset; + + /** + * Multiplied by qscale for each frame and added to scene_change_score. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_factor; + + /** + * + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + + /** + * Adjust sensitivity of b_frame_strategy 1. + * - encoding: Set by user. + * - decoding: unused + */ + int b_sensitivity; + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + +#if FF_API_REQUEST_CHANNELS + /** + * Decoder should decode to this many channels if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + * @deprecated Deprecated in favor of request_channel_layout. + */ + attribute_deprecated int request_channels; +#endif + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + +#if FF_API_GET_BUFFER + /** + * Called at the beginning of each frame to get a buffer for it. + * + * The function will set AVFrame.data[], AVFrame.linesize[]. + * AVFrame.extended_data[] must also be set, but it should be the same as + * AVFrame.data[] except for planar audio with more channels than can fit + * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as + * many data pointers as it can hold. + * + * if CODEC_CAP_DR1 is not set then get_buffer() must call + * avcodec_default_get_buffer() instead of providing buffers allocated by + * some other means. + * + * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't + * need it. avcodec_default_get_buffer() aligns the output buffer properly, + * but if get_buffer() is overridden then alignment considerations should + * be taken into account. + * + * @see avcodec_default_get_buffer() + * + * Video: + * + * If pic.reference is set then the frame will be read later by libavcodec. + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * it may be called from a different thread, but not from more than one at + * once. Does not need to be reentrant. + * + * @see release_buffer(), reget_buffer() + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * Decoders cannot use the buffer after returning from + * avcodec_decode_audio4(), so they will not call release_buffer(), as it + * is assumed to be released immediately upon return. In some rare cases, + * a decoder may need to call get_buffer() more than once in a single + * call to avcodec_decode_audio4(). In that case, when get_buffer() is + * called again after it has already been called once, the previously + * acquired buffer is assumed to be released at that time and may not be + * reused by the decoder. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated use get_buffer2() + */ + attribute_deprecated + int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called to release buffers which were allocated with get_buffer. + * A released buffer can be reused in get_buffer(). + * pic.data[*] must be set to NULL. + * May be called from a different thread if frame multithreading is used, + * but not by more than one thread at once, so does not need to be reentrant. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated custom freeing callbacks should be set from get_buffer2() + */ + attribute_deprecated + void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called at the beginning of a frame to get cr buffer for it. + * Buffer type (size, hints) must be the same. libavcodec won't check it. + * libavcodec will pass previous buffer in pic, function should return + * same buffer or new buffer with old frame "painted" into it. + * If pic.data[0] == NULL must behave like get_buffer(). + * if CODEC_CAP_DR1 is not set then reget_buffer() must call + * avcodec_default_reget_buffer() instead of providing buffers allocated by + * some other means. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + attribute_deprecated + int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); +#endif + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with av_malloc() and will be freed in + * av_frame_unref(). + * * otherwise exended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), + * and av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * av_frame_unref(). + * + * If CODEC_CAP_DR1 is not set then get_buffer2() must call + * avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an + * edge of the size returned by avcodec_get_edge_width() on all sides. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + + /** + * ratecontrol qmin qmax limiting method + * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax. + * - encoding: Set by user. + * - decoding: unused + */ + float rc_qsquish; + + float rc_qmod_amp; + int rc_qmod_freq; + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + + /** + * rate control equation + * - encoding: Set by user + * - decoding: unused + */ + const char *rc_eq; + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_min_rate; + + float rc_buffer_aggressivity; + + /** + * initial complexity for pass1 ratecontrol + * - encoding: Set by user. + * - decoding: unused + */ + float rc_initial_cplx; + + /** + * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 +#define FF_CODER_TYPE_DEFLATE 4 + /** + * coder type + * - encoding: Set by user. + * - decoding: unused + */ + int coder_type; + + /** + * context model + * - encoding: Set by user. + * - decoding: unused + */ + int context_model; + + /** + * minimum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmin; + + /** + * maximum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmax; + + /** + * frame skip threshold + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_threshold; + + /** + * frame skip factor + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_factor; + + /** + * frame skip exponent + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_exp; + + /** + * frame skip comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_cmp; + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int min_prediction_order; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int max_prediction_order; + + /** + * GOP timecode frame start number + * - encoding: Set by user, in non drop frame format + * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset) + */ + int64_t timecode_frame_start; + + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); + + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ + + /* statistics, used for 2-pass encoding */ + int mv_bits; + int header_bits; + int i_tex_bits; + int p_tex_bits; + int i_count; + int p_count; + int skip_count; + int misc_bits; + + /** + * number of bits used for the previously encoded frame + * - encoding: Set by libavcodec. + * - decoding: unused + */ + int frame_bits; + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#define FF_BUG_OLD_MSMPEG4 2 +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 + + /** + * strictly follow the standard (MPEG4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#define FF_DEBUG_MV 32 +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#define FF_DEBUG_PTS 0x00000200 +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; +#define AV_EF_CRCCHECK (1<<0) ///< verify embedded CRCs +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliancies as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * @deprecated in favor of pkt_pts + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#define FF_IDCT_SH4 9 +#define FF_IDCT_SIMPLEARM 10 +#define FF_IDCT_IPP 13 +#define FF_IDCT_XVIDMMX 14 +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#define FF_IDCT_SIMPLEVIS 18 +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#define FF_IDCT_SIMPLEALPHA 23 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + AVFrame *coded_frame; + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * thread opaque + * Can be used by execute() to store some per AVCodecContext stuff. + * - encoding: set by execute() + * - decoding: set by execute() + */ + void *thread_opaque; + + /** + * noise vs. sse weight for the nsse comparsion function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + + /** + * Simulates errors in the bitstream to test error concealment. + * - encoding: Set by user. + * - decoding: unused + */ + int error_rate; + + /** + * Current packet as passed into the decoder, to avoid having + * to pass the packet into every function. Currently only valid + * inside lavc and get/release_buffer callbacks. + * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts + * - encoding: unused + */ + AVPacket *pkt; + + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + */ + uint64_t vbv_delay; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_pkt_timebase(avctx) + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_codec_descriptor(avctx) + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * Code outside libavcodec should access this field using AVOptions + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; +} AVCodecContext; + +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +int av_codec_get_lowres(const AVCodecContext *avctx); +void av_codec_set_lowres(AVCodecContext *avctx, int val); + +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 +#if FF_API_LOWRES + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() +#endif + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from avcodec_register(). + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); +} AVCodec; + +int av_codec_get_max_lowres(const AVCodec *codec); + +/** + * AVHWAccel. + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see FF_HWACCEL_CODEC_CAP_* + */ + int capabilities; + + struct AVHWAccel *next; + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of HW accelerator private data. + * + * Private data is allocated with av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int priv_data_size; +} AVHWAccel; + +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + */ +typedef struct AVPicture { + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + + /** + * data+linesize for the bitmap of this subtitle. + * can be set for text/ass as well once they where rendered + */ + AVPicture pict; + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(const AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + + +#if FF_API_ALLOC_CONTEXT +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by simply calling av_free(). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + * + * @deprecated use avcodec_alloc_context3() + */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context(void); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context2(enum AVMediaType); + +/** + * Set the fields of the given AVCodecContext to default values. + * + * @param s The AVCodecContext of which the fields should be set to default values. + * @deprecated use avcodec_get_context_defaults3 + */ +attribute_deprecated +void avcodec_get_context_defaults(AVCodecContext *s); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); +#endif + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by calling avcodec_close() on it followed + * by av_free(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Set the fields of the given AVCodecContext to default values corresponding + * to the given codec (defaults may be codec-dependent). + * + * Do not call this function if a non-NULL codec has been passed + * to avcodec_alloc_context3() that allocated this AVCodecContext. + * If codec is non-NULL, it is illegal to call avcodec_open2() with a + * different codec on this AVCodecContext. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + */ +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using avcodec_free_frame(). + * + * @return An AVFrame filled with default values or NULL on failure. + * @see avcodec_get_frame_defaults + */ +AVFrame *avcodec_alloc_frame(void); + +/** + * Set the fields of the given AVFrame to default values. + * + * @param frame The AVFrame of which the fields should be set to default values. + */ +void avcodec_get_frame_defaults(AVFrame *frame); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. + * + * @param frame frame to be freed. The pointer will be set to NULL. + * + * @warning this function does NOT free the data buffers themselves + * (it does not know how, since they might have been allocated with + * a custom get_buffer()). + */ +void avcodec_free_frame(AVFrame **frame); + +#if FF_API_AVCODEC_OPEN +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated. + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open(context, codec) < 0) + * exit(1); + * @endcode + * + * @param avctx The context which will be set up to use the given codec. + * @param codec The codec to use within the context. + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close + * + * @deprecated use avcodec_open2 + */ +attribute_deprecated +int avcodec_open(AVCodecContext *avctx, AVCodec *codec); +#endif + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * avcodec_get_context_defaults3() for this context, then this + * parameter MUST be either NULL or equal to the previously passed + * codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() / + * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will + * do nothing. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +#if FF_API_DESTRUCT_PACKET +/** + * Default packet destructor. + * @deprecated use the AVBuffer API instead + */ +attribute_deprecated +void av_destruct_packet(AVPacket *pkt); +#endif + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + */ +int av_dup_packet(AVPacket *pkt); + +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet(AVPacket *dst, AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet_side_data(AVPacket *dst, AVPacket *src); + +/** + * Free a packet. + * + * @param pkt packet to free + */ +void av_free_packet(AVPacket *pkt); + +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +int av_packet_merge_side_data(AVPacket *pkt); + +int av_packet_split_side_data(AVPacket *pkt); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + * + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder_by_name(const char *name); + +#if FF_API_GET_BUFFER +attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); +#endif + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * CODEC_CAP_DR1 set. + */ +int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +/** + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. + * + * @return Required padding in pixels. + */ +unsigned avcodec_get_edge_width(void); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +#if FF_API_OLD_DECODE_AUDIO +/** + * Wrapper function which calls avcodec_decode_audio4. + * + * @deprecated Use avcodec_decode_audio4 instead. + * + * Decode the audio frame of size avpkt->size from avpkt->data into samples. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio3 has to be called again with an AVPacket that contains + * the remaining data in order to decode the second frame etc. + * If no frame + * could be outputted, frame_size_ptr is zero. Otherwise, it is the + * decompressed frame size in bytes. + * + * @warning You must set frame_size_ptr to the allocated size of the + * output buffer before calling avcodec_decode_audio3(). + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @warning You must not provide a custom get_buffer() when using + * avcodec_decode_audio3(). Doing so will override it with + * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, + * which does allow the application to provide a custom get_buffer(). + * + * @note You might have to align the input buffer avpkt->data and output buffer + * samples. The alignment requirements depend on the CPU: On some CPUs it isn't + * necessary at all, on others it won't work at all if not aligned and on others + * it will work but it will have an impact on performance. + * + * In practice, avpkt->data should have 4 byte alignment at minimum and + * samples should be 16 byte aligned unless the CPU doesn't need it + * (AltiVec and SSE do). + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] samples the output buffer, sample type in avctx->sample_fmt + * If the sample format is planar, each channel plane will + * be the same size, with no padding between channels. + * @param[in,out] frame_size_ptr the output buffer size in bytes + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields. + * All decoders are designed to use the least fields possible though. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame data was decompressed (used) from the input AVPacket. + */ +attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, + int *frame_size_ptr, + AVPacket *avpkt); +#endif + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + */ +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + */ +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @param avctx the codec context + * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be + freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + + /** + * Time difference in stream time base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current frame. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +#if FF_API_OLD_ENCODE_AUDIO +/** + * Encode an audio frame from samples into buf. + * + * @deprecated Use avcodec_encode_audio2 instead. + * + * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. + * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user + * will know how much space is needed because it depends on the value passed + * in buf_size as described below. In that case a lower value can be used. + * + * @param avctx the codec context + * @param[out] buf the output buffer + * @param[in] buf_size the output buffer size + * @param[in] samples the input buffer containing the samples + * The number of samples read from this buffer is frame_size*channels, + * both of which are defined in avctx. + * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of + * samples read from samples is equal to: + * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) + * This also implies that av_get_bits_per_sample() must not return 0 for these + * codecs. + * @return On error a negative value is returned, on success zero or the number + * of bytes used to encode the data read from the input buffer. + */ +int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, + uint8_t *buf, int buf_size, + const short *samples); +#endif + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +#if FF_API_OLD_ENCODE_VIDEO +/** + * @deprecated use avcodec_encode_video2() instead. + * + * Encode a video frame from pict into buf. + * The input picture should be + * stored using a specific format, namely avctx.pix_fmt. + * + * @param avctx the codec context + * @param[out] buf the output buffer for the bitstream of encoded frame + * @param[in] buf_size the size of the output buffer in bytes + * @param[in] pict the input picture to encode + * @return On error a negative value is returned, on success zero or the number + * of bytes used from the output buffer. + */ +attribute_deprecated +int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVFrame *pict); +#endif + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVCODEC_RESAMPLE +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * @deprecated use libswresample instead + * + * @{ + */ +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +attribute_deprecated +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +attribute_deprecated +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +attribute_deprecated +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +attribute_deprecated +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +attribute_deprecated +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +attribute_deprecated +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +attribute_deprecated +void av_resample_close(struct AVResampleContext *c); + +/** + * @} + */ +#endif + +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * Allocate memory for the pixels of a picture and setup the AVPicture + * fields for it. + * + * Call avpicture_free() to free it. + * + * @param picture the picture structure to be filled in + * @param pix_fmt the pixel format of the picture + * @param width the width of the picture + * @param height the height of the picture + * @return zero if successful, a negative error code otherwise + * + * @see av_image_alloc(), avpicture_fill() + */ +int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Free a picture previously allocated by avpicture_alloc(). + * The data buffer used by the AVPicture is freed, but the AVPicture structure + * itself is not. + * + * @param picture the AVPicture to be freed + */ +void avpicture_free(AVPicture *picture); + +/** + * Setup the picture fields based on the specified image parameters + * and the provided image data buffer. + * + * The picture fields are filled in by using the image data buffer + * pointed to by ptr. + * + * If ptr is NULL, the function will fill only the picture linesize + * array and return the required size for the image buffer. + * + * To allocate an image buffer and fill the picture data in one call, + * use avpicture_alloc(). + * + * @param picture the picture to be filled in + * @param ptr buffer where the image data is stored, or NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return the size in bytes required for src, a negative error code + * in case of failure + * + * @see av_image_fill_arrays() + */ +int avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy pixel data from an AVPicture into a buffer. + * + * avpicture_get_size() can be used to compute the required size for + * the buffer to fill. + * + * @param src source picture with filled data + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @param dest destination buffer + * @param dest_size destination buffer size in bytes + * @return the number of bytes written to dest, or a negative value + * (error code) on error, for example if the destination buffer is not + * big enough + * + * @see av_image_copy_to_buffer() + */ +int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * Calculate the size in bytes that a picture of the given width and height + * would occupy if stored in the given picture format. + * + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @return the computed picture buffer size or a negative error code + * in case of error + * + * @see av_image_get_buffer_size(). + */ +int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +#if FF_API_DEINTERLACE +/** + * deinterlace - if not supported return -1 + * + * @deprecated - use yadif (in libavfilter) instead + */ +attribute_deprecated +int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); +#endif +/** + * Copy image src to dst. Wraps av_image_copy(). + */ +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Crop image top and left side. + */ +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * Pad image. + */ +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample + * for one that returns a failure code and continues in case of invalid + * pix_fmts. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @see av_pix_fmt_get_chroma_sub_sample + */ + +void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * avcodec_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format and a selection of two destination pixel formats. When converting from + * one pixel format to another, information loss may occur. For example, when converting + * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when + * converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of + * the given pixel formats should be used to suffer the least amount of loss. + * + * If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be + * returned. + * + * @code + * src_pix_fmt = AV_PIX_FMT_YUV420P; + * dst_pix_fmt1= AV_PIX_FMT_RGB24; + * dst_pix_fmt2= AV_PIX_FMT_GRAY8; + * dst_pix_fmt3= AV_PIX_FMT_RGB8; + * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored. + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss); + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss); + * @endcode + * + * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from + * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from + * @param[in] src_pix_fmt Source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e. + * NULL or value of zero means we care about all losses. Out: the loss + * that occurs when converting from src to selected dst pixel format. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); +#else +enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); +#endif + + +enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); + +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf_size size in bytes of buf + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + */ +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + + +typedef struct AVBitStreamFilterContext { + void *priv_data; + struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; +} AVBitStreamFilterContext; + + +typedef struct AVBitStreamFilter { + const char *name; + int priv_data_size; + int (*filter)(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + void (*close)(AVBitStreamFilterContext *bsfc); + struct AVBitStreamFilter *next; +} AVBitStreamFilter; + +/** + * Register a bitstream filter. + * + * The filter will be accessible to the application code through + * av_bitstream_filter_next() or can be directly initialized with + * av_bitstream_filter_init(). + * + * @see avcodec_register_all() + */ +void av_register_bitstream_filter(AVBitStreamFilter *bsf); + +/** + * Create and initialize a bitstream filter context given a bitstream + * filter name. + * + * The returned context must be freed with av_bitstream_filter_close(). + * + * @param name the name of the bitstream filter + * @return a bitstream filter context if a matching filter was found + * and successfully initialized, NULL otherwise + */ +AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); + +/** + * Filter bitstream. + * + * This function filters the buffer buf with size buf_size, and places the + * filtered buffer in the buffer pointed to by poutbuf. + * + * The output buffer must be freed by the caller. + * + * @param bsfc bitstream filter context created by av_bitstream_filter_init() + * @param avctx AVCodecContext accessed by the filter, may be NULL. + * If specified, this must point to the encoder context of the + * output stream the packet is sent to. + * @param args arguments which specify the filter configuration, may be NULL + * @param poutbuf pointer which is updated to point to the filtered buffer + * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes + * @param buf buffer containing the data to filter + * @param buf_size size in bytes of buf + * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data + * @return >= 0 in case of success, or a negative error code in case of failure + * + * If the return value is positive, an output buffer is allocated and + * is availble in *poutbuf, and is distinct from the input buffer. + * + * If the return value is 0, the output buffer is not allocated and + * should be considered identical to the input buffer, or in case + * *poutbuf was set it points to the input buffer (not necessarily to + * its starting address). + */ +int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + +/** + * Release bitstream filter context. + * + * @param bsf the bitstream filter context created with + * av_bitstream_filter_init(), can be NULL + */ +void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); + +/** + * If f is NULL, return the first registered bitstream filter, + * if f is non-NULL, return the next registered bitstream filter + * after f, or NULL if f is the last one. + * + * This function can be used to iterate over all registered bitstream + * filters. + */ +AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); + +/* memory */ + +/** + * Reallocate the given block if it is not large enough, otherwise do nothing. + * + * @see av_realloc + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_malloc but the buffer has additional + * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_MISSING_SAMPLE +/** + * Log a generic warning message about a missing feature. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] feature string containing the name of the missing feature + * @param[in] want_sample indicates if samples are wanted which exhibit this feature. + * If want_sample is non-zero, additional verbage will be added to the log + * message which tells the user how to report samples to the development + * mailing list. + * @deprecated Use avpriv_report_missing_feature() instead. + */ +attribute_deprecated +void av_log_missing_feature(void *avc, const char *feature, int want_sample); + +/** + * Log a generic warning message asking for a sample. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] msg string containing an optional message, or NULL if no message + * @deprecated Use avpriv_request_sample() instead. + */ +attribute_deprecated +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); +#endif /* FF_API_MISSING_SAMPLE */ + +/** + * Register the hardware accelerator hwaccel. + */ +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + */ +AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel); + + +/** + * Lock operation used by lockmgr + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. mutex points to a (void *) where the + * lockmgr should store/get a pointer to a user allocated mutex. It's + * NULL upon AV_LOCK_CREATE and != NULL for all other ops. + * + * @param cb User defined callback. Note: FFmpeg may invoke calls to this + * callback during the call to av_lockmgr_register(). + * Thus, the application must be prepared to handle that. + * If cb is set to NULL the lockmgr will be unregistered. + * Also note that during unregistration the previously registered + * lockmgr callback may also be invoked. + */ +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); + +/** + * Get the type of the given codec. + */ +enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avfft.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avfft.h new file mode 100644 index 0000000..2d20a45 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/avfft.h @@ -0,0 +1,116 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling ff_fft_calc(). + */ +void av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void av_fft_calc(FFTContext *s, FFTComplex *z); + +void av_fft_end(FFTContext *s); + +FFTContext *av_mdct_init(int nbits, int inverse, double scale); +void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); +void av_rdft_calc(RDFTContext *s, FFTSample *data); +void av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *av_dct_init(int nbits, enum DCTTransformType type); +void av_dct_calc(DCTContext *s, FFTSample *data); +void av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/dxva2.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/dxva2.h new file mode 100644 index 0000000..ac39e06 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/dxva2.h @@ -0,0 +1,95 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA_H +#define AVCODEC_DXVA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0600 +#undef _WIN32_WINNT +#endif + +#if !defined(_WIN32_WINNT) +#define _WIN32_WINNT 0x0600 +#endif + +#include +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/old_codec_ids.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/old_codec_ids.h new file mode 100644 index 0000000..d8a8f74 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/old_codec_ids.h @@ -0,0 +1,397 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_OLD_CODEC_IDS_H +#define AVCODEC_OLD_CODEC_IDS_H + +#include "libavutil/common.h" + +/* + * This header exists to prevent new codec IDs from being accidentally added to + * the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVCodecID enum instead. + */ + + CODEC_ID_NONE = AV_CODEC_ID_NONE, + + /* video codecs */ + CODEC_ID_MPEG1VIDEO, + CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + CODEC_ID_MPEG2VIDEO_XVMC, + CODEC_ID_H261, + CODEC_ID_H263, + CODEC_ID_RV10, + CODEC_ID_RV20, + CODEC_ID_MJPEG, + CODEC_ID_MJPEGB, + CODEC_ID_LJPEG, + CODEC_ID_SP5X, + CODEC_ID_JPEGLS, + CODEC_ID_MPEG4, + CODEC_ID_RAWVIDEO, + CODEC_ID_MSMPEG4V1, + CODEC_ID_MSMPEG4V2, + CODEC_ID_MSMPEG4V3, + CODEC_ID_WMV1, + CODEC_ID_WMV2, + CODEC_ID_H263P, + CODEC_ID_H263I, + CODEC_ID_FLV1, + CODEC_ID_SVQ1, + CODEC_ID_SVQ3, + CODEC_ID_DVVIDEO, + CODEC_ID_HUFFYUV, + CODEC_ID_CYUV, + CODEC_ID_H264, + CODEC_ID_INDEO3, + CODEC_ID_VP3, + CODEC_ID_THEORA, + CODEC_ID_ASV1, + CODEC_ID_ASV2, + CODEC_ID_FFV1, + CODEC_ID_4XM, + CODEC_ID_VCR1, + CODEC_ID_CLJR, + CODEC_ID_MDEC, + CODEC_ID_ROQ, + CODEC_ID_INTERPLAY_VIDEO, + CODEC_ID_XAN_WC3, + CODEC_ID_XAN_WC4, + CODEC_ID_RPZA, + CODEC_ID_CINEPAK, + CODEC_ID_WS_VQA, + CODEC_ID_MSRLE, + CODEC_ID_MSVIDEO1, + CODEC_ID_IDCIN, + CODEC_ID_8BPS, + CODEC_ID_SMC, + CODEC_ID_FLIC, + CODEC_ID_TRUEMOTION1, + CODEC_ID_VMDVIDEO, + CODEC_ID_MSZH, + CODEC_ID_ZLIB, + CODEC_ID_QTRLE, + CODEC_ID_TSCC, + CODEC_ID_ULTI, + CODEC_ID_QDRAW, + CODEC_ID_VIXL, + CODEC_ID_QPEG, + CODEC_ID_PNG, + CODEC_ID_PPM, + CODEC_ID_PBM, + CODEC_ID_PGM, + CODEC_ID_PGMYUV, + CODEC_ID_PAM, + CODEC_ID_FFVHUFF, + CODEC_ID_RV30, + CODEC_ID_RV40, + CODEC_ID_VC1, + CODEC_ID_WMV3, + CODEC_ID_LOCO, + CODEC_ID_WNV1, + CODEC_ID_AASC, + CODEC_ID_INDEO2, + CODEC_ID_FRAPS, + CODEC_ID_TRUEMOTION2, + CODEC_ID_BMP, + CODEC_ID_CSCD, + CODEC_ID_MMVIDEO, + CODEC_ID_ZMBV, + CODEC_ID_AVS, + CODEC_ID_SMACKVIDEO, + CODEC_ID_NUV, + CODEC_ID_KMVC, + CODEC_ID_FLASHSV, + CODEC_ID_CAVS, + CODEC_ID_JPEG2000, + CODEC_ID_VMNC, + CODEC_ID_VP5, + CODEC_ID_VP6, + CODEC_ID_VP6F, + CODEC_ID_TARGA, + CODEC_ID_DSICINVIDEO, + CODEC_ID_TIERTEXSEQVIDEO, + CODEC_ID_TIFF, + CODEC_ID_GIF, + CODEC_ID_DXA, + CODEC_ID_DNXHD, + CODEC_ID_THP, + CODEC_ID_SGI, + CODEC_ID_C93, + CODEC_ID_BETHSOFTVID, + CODEC_ID_PTX, + CODEC_ID_TXD, + CODEC_ID_VP6A, + CODEC_ID_AMV, + CODEC_ID_VB, + CODEC_ID_PCX, + CODEC_ID_SUNRAST, + CODEC_ID_INDEO4, + CODEC_ID_INDEO5, + CODEC_ID_MIMIC, + CODEC_ID_RL2, + CODEC_ID_ESCAPE124, + CODEC_ID_DIRAC, + CODEC_ID_BFI, + CODEC_ID_CMV, + CODEC_ID_MOTIONPIXELS, + CODEC_ID_TGV, + CODEC_ID_TGQ, + CODEC_ID_TQI, + CODEC_ID_AURA, + CODEC_ID_AURA2, + CODEC_ID_V210X, + CODEC_ID_TMV, + CODEC_ID_V210, + CODEC_ID_DPX, + CODEC_ID_MAD, + CODEC_ID_FRWU, + CODEC_ID_FLASHSV2, + CODEC_ID_CDGRAPHICS, + CODEC_ID_R210, + CODEC_ID_ANM, + CODEC_ID_BINKVIDEO, + CODEC_ID_IFF_ILBM, + CODEC_ID_IFF_BYTERUN1, + CODEC_ID_KGV1, + CODEC_ID_YOP, + CODEC_ID_VP8, + CODEC_ID_PICTOR, + CODEC_ID_ANSI, + CODEC_ID_A64_MULTI, + CODEC_ID_A64_MULTI5, + CODEC_ID_R10K, + CODEC_ID_MXPEG, + CODEC_ID_LAGARITH, + CODEC_ID_PRORES, + CODEC_ID_JV, + CODEC_ID_DFA, + CODEC_ID_WMV3IMAGE, + CODEC_ID_VC1IMAGE, + CODEC_ID_UTVIDEO, + CODEC_ID_BMV_VIDEO, + CODEC_ID_VBLE, + CODEC_ID_DXTORY, + CODEC_ID_V410, + CODEC_ID_XWD, + CODEC_ID_CDXL, + CODEC_ID_XBM, + CODEC_ID_ZEROCODEC, + CODEC_ID_MSS1, + CODEC_ID_MSA1, + CODEC_ID_TSCC2, + CODEC_ID_MTS2, + CODEC_ID_CLLC, + CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + CODEC_ID_EXR = MKBETAG('0','E','X','R'), + CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + CODEC_ID_V308 = MKBETAG('V','3','0','8'), + CODEC_ID_V408 = MKBETAG('V','4','0','8'), + CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + CODEC_ID_SANM = MKBETAG('S','A','N','M'), + CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + CODEC_ID_SNOW = AV_CODEC_ID_SNOW, + + /* various PCM "codecs" */ + CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + CODEC_ID_PCM_S16LE = 0x10000, + CODEC_ID_PCM_S16BE, + CODEC_ID_PCM_U16LE, + CODEC_ID_PCM_U16BE, + CODEC_ID_PCM_S8, + CODEC_ID_PCM_U8, + CODEC_ID_PCM_MULAW, + CODEC_ID_PCM_ALAW, + CODEC_ID_PCM_S32LE, + CODEC_ID_PCM_S32BE, + CODEC_ID_PCM_U32LE, + CODEC_ID_PCM_U32BE, + CODEC_ID_PCM_S24LE, + CODEC_ID_PCM_S24BE, + CODEC_ID_PCM_U24LE, + CODEC_ID_PCM_U24BE, + CODEC_ID_PCM_S24DAUD, + CODEC_ID_PCM_ZORK, + CODEC_ID_PCM_S16LE_PLANAR, + CODEC_ID_PCM_DVD, + CODEC_ID_PCM_F32BE, + CODEC_ID_PCM_F32LE, + CODEC_ID_PCM_F64BE, + CODEC_ID_PCM_F64LE, + CODEC_ID_PCM_BLURAY, + CODEC_ID_PCM_LXF, + CODEC_ID_S302M, + CODEC_ID_PCM_S8_PLANAR, + + /* various ADPCM codecs */ + CODEC_ID_ADPCM_IMA_QT = 0x11000, + CODEC_ID_ADPCM_IMA_WAV, + CODEC_ID_ADPCM_IMA_DK3, + CODEC_ID_ADPCM_IMA_DK4, + CODEC_ID_ADPCM_IMA_WS, + CODEC_ID_ADPCM_IMA_SMJPEG, + CODEC_ID_ADPCM_MS, + CODEC_ID_ADPCM_4XM, + CODEC_ID_ADPCM_XA, + CODEC_ID_ADPCM_ADX, + CODEC_ID_ADPCM_EA, + CODEC_ID_ADPCM_G726, + CODEC_ID_ADPCM_CT, + CODEC_ID_ADPCM_SWF, + CODEC_ID_ADPCM_YAMAHA, + CODEC_ID_ADPCM_SBPRO_4, + CODEC_ID_ADPCM_SBPRO_3, + CODEC_ID_ADPCM_SBPRO_2, + CODEC_ID_ADPCM_THP, + CODEC_ID_ADPCM_IMA_AMV, + CODEC_ID_ADPCM_EA_R1, + CODEC_ID_ADPCM_EA_R3, + CODEC_ID_ADPCM_EA_R2, + CODEC_ID_ADPCM_IMA_EA_SEAD, + CODEC_ID_ADPCM_IMA_EA_EACS, + CODEC_ID_ADPCM_EA_XAS, + CODEC_ID_ADPCM_EA_MAXIS_XA, + CODEC_ID_ADPCM_IMA_ISS, + CODEC_ID_ADPCM_G722, + CODEC_ID_ADPCM_IMA_APC, + CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + + /* AMR */ + CODEC_ID_AMR_NB = 0x12000, + CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + CODEC_ID_RA_144 = 0x13000, + CODEC_ID_RA_288, + + /* various DPCM codecs */ + CODEC_ID_ROQ_DPCM = 0x14000, + CODEC_ID_INTERPLAY_DPCM, + CODEC_ID_XAN_DPCM, + CODEC_ID_SOL_DPCM, + + /* audio codecs */ + CODEC_ID_MP2 = 0x15000, + CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + CODEC_ID_AAC, + CODEC_ID_AC3, + CODEC_ID_DTS, + CODEC_ID_VORBIS, + CODEC_ID_DVAUDIO, + CODEC_ID_WMAV1, + CODEC_ID_WMAV2, + CODEC_ID_MACE3, + CODEC_ID_MACE6, + CODEC_ID_VMDAUDIO, + CODEC_ID_FLAC, + CODEC_ID_MP3ADU, + CODEC_ID_MP3ON4, + CODEC_ID_SHORTEN, + CODEC_ID_ALAC, + CODEC_ID_WESTWOOD_SND1, + CODEC_ID_GSM, ///< as in Berlin toast format + CODEC_ID_QDM2, + CODEC_ID_COOK, + CODEC_ID_TRUESPEECH, + CODEC_ID_TTA, + CODEC_ID_SMACKAUDIO, + CODEC_ID_QCELP, + CODEC_ID_WAVPACK, + CODEC_ID_DSICINAUDIO, + CODEC_ID_IMC, + CODEC_ID_MUSEPACK7, + CODEC_ID_MLP, + CODEC_ID_GSM_MS, /* as found in WAV */ + CODEC_ID_ATRAC3, + CODEC_ID_VOXWARE, + CODEC_ID_APE, + CODEC_ID_NELLYMOSER, + CODEC_ID_MUSEPACK8, + CODEC_ID_SPEEX, + CODEC_ID_WMAVOICE, + CODEC_ID_WMAPRO, + CODEC_ID_WMALOSSLESS, + CODEC_ID_ATRAC3P, + CODEC_ID_EAC3, + CODEC_ID_SIPR, + CODEC_ID_MP1, + CODEC_ID_TWINVQ, + CODEC_ID_TRUEHD, + CODEC_ID_MP4ALS, + CODEC_ID_ATRAC1, + CODEC_ID_BINKAUDIO_RDFT, + CODEC_ID_BINKAUDIO_DCT, + CODEC_ID_AAC_LATM, + CODEC_ID_QDMC, + CODEC_ID_CELT, + CODEC_ID_G723_1, + CODEC_ID_G729, + CODEC_ID_8SVX_EXP, + CODEC_ID_8SVX_FIB, + CODEC_ID_BMV_AUDIO, + CODEC_ID_RALF, + CODEC_ID_IAC, + CODEC_ID_ILBC, + CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + + /* subtitle codecs */ + CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + CODEC_ID_DVD_SUBTITLE = 0x17000, + CODEC_ID_DVB_SUBTITLE, + CODEC_ID_TEXT, ///< raw UTF-8 text + CODEC_ID_XSUB, + CODEC_ID_SSA, + CODEC_ID_MOV_TEXT, + CODEC_ID_HDMV_PGS_SUBTITLE, + CODEC_ID_DVB_TELETEXT, + CODEC_ID_SRT, + CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + + /* other specific kind of codecs (generally used for attachments) */ + CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + CODEC_ID_TTF = 0x18000, + CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + + CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it + + CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#endif /* AVCODEC_OLD_CODEC_IDS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vaapi.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vaapi.h new file mode 100644 index 0000000..815a27e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vaapi.h @@ -0,0 +1,173 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +struct vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; + + /** + * VAPictureParameterBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t pic_param_buf_id; + + /** + * VAIQMatrixBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t iq_matrix_buf_id; + + /** + * VABitPlaneBuffer ID (for VC-1 decoding) + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t bitplane_buf_id; + + /** + * Slice parameter/data buffer IDs + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t *slice_buf_ids; + + /** + * Number of effective slice buffer IDs to send to the HW + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int n_slice_buf_ids; + + /** + * Size of pre-allocated slice_buf_ids + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_buf_ids_alloc; + + /** + * Pointer to VASliceParameterBuffers + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + void *slice_params; + + /** + * Size of a VASliceParameterBuffer element + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_param_size; + + /** + * Size of pre-allocated slice_params + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_params_alloc; + + /** + * Number of slices currently filled in + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_count; + + /** + * Pointer to slice data buffer base + * - encoding: unused + * - decoding: Set by libavcodec + */ + const uint8_t *slice_data; + + /** + * Current size of slice data + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t slice_data_size; +}; + +/* @} */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vda.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vda.h new file mode 100644 index 0000000..b3d6399 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vda.h @@ -0,0 +1,162 @@ +/* + * VDA HW acceleration + * + * copyright (c) 2011 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDA_H +#define AVCODEC_VDA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + +#include + +// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes +// http://openradar.appspot.com/8026390 +#undef __GNUC_STDC_INLINE__ + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "libavcodec/version.h" + +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +/** + * This structure is used to provide the necessary configurations and data + * to the VDA FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct vda_context { + /** + * VDA decoder object. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + VDADecoder decoder; + + /** + * The Core Video pixel buffer that contains the current image data. + * + * encoding: unused + * decoding: Set by libavcodec. Unset by user. + */ + CVPixelBufferRef cv_buffer; + + /** + * Use the hardware decoder in synchronous mode. + * + * encoding: unused + * decoding: Set by user. + */ + int use_sync_decoding; + + /** + * The frame width. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int width; + + /** + * The frame height. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int height; + + /** + * The frame format. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int format; + + /** + * The pixel format for output image buffers. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + OSType cv_pix_fmt_type; + + /** + * The current bitstream buffer. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + uint8_t *priv_bitstream; + + /** + * The current size of the bitstream. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_bitstream_size; + + /** + * The reference size used for fast reallocation. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_allocated_size; + + /** + * Use av_buffer to manage buffer. + * When the flag is set, the CVPixelBuffers returned by the decoder will + * be released automatically, so you have to retain them if necessary. + * Not setting this flag may cause memory leak. + * + * encoding: unused + * decoding: Set by user. + */ + int use_ref_buffer; +}; + +/** Create the video decoder. */ +int ff_vda_create_decoder(struct vda_context *vda_ctx, + uint8_t *extradata, + int extradata_size); + +/** Destroy the video decoder. */ +int ff_vda_destroy_decoder(struct vda_context *vda_ctx); + +/** + * @} + */ + +#endif /* AVCODEC_VDA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vdpau.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vdpau.h new file mode 100644 index 0000000..b1c836c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/vdpau.h @@ -0,0 +1,195 @@ +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include +#include +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU 1 +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU 1 +#endif + +#if FF_API_BUFS_VDPAU +union AVVDPAUPictureInfo { + VdpPictureInfoH264 h264; + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoVC1 vc1; + VdpPictureInfoMPEG4Part2 mpeg4; +}; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + +#if FF_API_BUFS_VDPAU + /** + * VDPAU picture information + * + * Set by libavcodec. + */ + attribute_deprecated + union AVVDPAUPictureInfo info; + + /** + * Allocated size of the bitstream_buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_allocated; + + /** + * Useful bitstream buffers in the bitstream buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_used; + + /** + * Table of bitstream buffers. + * The user is responsible for freeing this buffer using av_freep(). + * + * Set by libavcodec. + */ + attribute_deprecated + VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +#if FF_API_CAP_VDPAU +/** @brief The videoSurface is used for rendering. */ +#define FF_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * @brief The videoSurface is needed for reference/prediction. + * The codec manipulates this. + */ +#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 + +/** + * @brief This structure is used as a callback between the FFmpeg + * decoder (vd_) and presentation (vo_) module. + * This is used for defining a video frame containing surface, + * picture parameter, bitstream information etc which are passed + * between the FFmpeg decoder and its clients. + */ +struct vdpau_render_state { + VdpVideoSurface surface; ///< Used as rendered surface, never changed. + + int state; ///< Holds FF_VDPAU_STATE_* values. + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif + + /** Describe size/location of the compressed video data. + Set to 0 when freeing bitstream_buffers. */ + int bitstream_buffers_allocated; + int bitstream_buffers_used; + /** The user is responsible for freeing this buffer using av_freep(). */ + VdpBitstreamBuffer *bitstream_buffers; + +#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif +}; +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/version.h new file mode 100644 index 0000000..63f6346 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/version.h @@ -0,0 +1,104 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "libavutil/avutil.h" + +#define LIBAVCODEC_VERSION_MAJOR 55 +#define LIBAVCODEC_VERSION_MINOR 39 +#define LIBAVCODEC_VERSION_MICRO 101 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_REQUEST_CHANNELS +#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ALLOC_CONTEXT +#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_AVCODEC_OPEN +#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_OLD_DECODE_AUDIO +#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_TIMECODE +#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55) +#endif + +#ifndef FF_API_OLD_ENCODE_AUDIO +#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_ENCODE_VIDEO +#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CODEC_ID +#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_AVCODEC_RESAMPLE +#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DEINTERLACE +#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DESTRUCT_PACKET +#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_GET_BUFFER +#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_MISSING_SAMPLE +#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif + +#endif /* AVCODEC_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/xvmc.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/xvmc.h new file mode 100644 index 0000000..b2bf518 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavcodec/xvmc.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include + +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/avdevice.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/avdevice.h new file mode 100644 index 0000000..93a044f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/avdevice.h @@ -0,0 +1,69 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_AVDEVICE_H +#define AVDEVICE_AVDEVICE_H + +#include "version.h" + +/** + * @file + * @ingroup lavd + * Main libavdevice API header + */ + +/** + * @defgroup lavd Special devices muxing/demuxing library + * @{ + * Libavdevice is a complementary library to @ref libavf "libavformat". It + * provides various "special" platform-specific muxers and demuxers, e.g. for + * grabbing devices, audio capture and playback etc. As a consequence, the + * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own + * I/O functions). The filename passed to avformat_open_input() often does not + * refer to an actually existing file, but has some special device-specific + * meaning - e.g. for x11grab it is the display name. + * + * To use libavdevice, simply call avdevice_register_all() to register all + * compiled muxers and demuxers. They all use standard libavformat API. + * @} + */ + +#include "libavformat/avformat.h" + +/** + * Return the LIBAVDEVICE_VERSION_INT constant. + */ +unsigned avdevice_version(void); + +/** + * Return the libavdevice build-time configuration. + */ +const char *avdevice_configuration(void); + +/** + * Return the libavdevice license. + */ +const char *avdevice_license(void); + +/** + * Initialize libavdevice and register all the input and output devices. + * @warning This function is not thread safe. + */ +void avdevice_register_all(void); + +#endif /* AVDEVICE_AVDEVICE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/version.h new file mode 100644 index 0000000..4f4ec99 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavdevice/version.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_VERSION_H +#define AVDEVICE_VERSION_H + +/** + * @file + * @ingroup lavd + * Libavdevice version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVDEVICE_VERSION_MAJOR 55 +#define LIBAVDEVICE_VERSION_MINOR 5 +#define LIBAVDEVICE_VERSION_MICRO 100 + +#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT + +#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#endif /* AVDEVICE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/asrc_abuffer.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/asrc_abuffer.h new file mode 100644 index 0000000..aa34461 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/asrc_abuffer.h @@ -0,0 +1,91 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_ASRC_ABUFFER_H +#define AVFILTER_ASRC_ABUFFER_H + +#include "avfilter.h" + +/** + * @file + * memory buffer source for audio + * + * @deprecated use buffersrc.h instead. + */ + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param data pointers to the samples planes + * @param linesize linesizes of each audio buffer plane + * @param nb_samples number of samples per channel + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param planar flag to indicate if audio data is planar or packed + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * This is similar to av_asrc_buffer_add_samples(), but the samples + * are stored in a buffer with known size. + * + * @param abuffersrc audio source buffer context + * @param buf pointer to the samples data, packed is assumed + * @param size the size in bytes of the buffer, it must contain an + * integer number of samples + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc, + uint8_t *buf, int buf_size, + int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param samplesref buffer ref to queue + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc, + AVFilterBufferRef *samplesref, + int av_unused flags); + +#endif /* AVFILTER_ASRC_ABUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avcodec.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avcodec.h new file mode 100644 index 0000000..8bbdad2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avcodec.h @@ -0,0 +1,110 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVCODEC_H +#define AVFILTER_AVCODEC_H + +/** + * @file + * libavcodec/libavfilter gluing utilities + * + * This should be included in an application ONLY if the installed + * libavfilter has been compiled with libavcodec support, otherwise + * symbols defined below will not be available. + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms); + + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms); + +/** + * Create and return a buffer reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms); +#endif + +#if FF_API_FILL_FRAME +/** + * Fill an AVFrame with the information stored in samplesref. + * + * @param frame an already allocated AVFrame + * @param samplesref an audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref); + +/** + * Fill an AVFrame with the information stored in picref. + * + * @param frame an already allocated AVFrame + * @param picref a video buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref); + +/** + * Fill an AVFrame with information stored in ref. + * + * @param frame an already allocated AVFrame + * @param ref a video or audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref); +#endif + +#endif /* AVFILTER_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfilter.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfilter.h new file mode 100644 index 0000000..412b123 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfilter.h @@ -0,0 +1,1520 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi Libavfilter - graph-based frame editing library + * @{ + */ + +#include + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +#if FF_API_AVFILTERBUFFER +/** + * A reference-counted buffer data type used by the filter system. Filters + * should not store pointers to this structure directly, but instead use the + * AVFilterBufferRef structure below. + */ +typedef struct AVFilterBuffer { + uint8_t *data[8]; ///< buffer data for each plane/channel + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + /** private data to be used by a custom free function */ + void *priv; + /** + * A pointer to the function to deallocate this buffer if the default + * function is not sufficient. This could, for example, add the memory + * back into a memory pool to be reused later without the overhead of + * reallocating it from scratch. + */ + void (*free)(struct AVFilterBuffer *buf); + + int format; ///< media format + int w, h; ///< width and height of the allocated buffer + unsigned refcount; ///< number of references to this buffer +} AVFilterBuffer; + +#define AV_PERM_READ 0x01 ///< can read from the buffer +#define AV_PERM_WRITE 0x02 ///< can write to the buffer +#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer +#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time +#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time +#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes +#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned + +#define AVFILTER_ALIGN 16 //not part of ABI + +/** + * Audio specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, audio specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefAudioProps { + uint64_t channel_layout; ///< channel layout of audio buffer + int nb_samples; ///< number of audio samples per channel + int sample_rate; ///< audio buffer sample rate + int channels; ///< number of channels (do not access directly) +} AVFilterBufferRefAudioProps; + +/** + * Video specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, video specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefVideoProps { + int w; ///< image width + int h; ///< image height + AVRational sample_aspect_ratio; ///< sample aspect ratio + int interlaced; ///< is frame interlaced + int top_field_first; ///< field order + enum AVPictureType pict_type; ///< picture type of the frame + int key_frame; ///< 1 -> keyframe, 0-> not + int qp_table_linesize; ///< qp_table stride + int qp_table_size; ///< qp_table size + int8_t *qp_table; ///< array of Quantization Parameters +} AVFilterBufferRefVideoProps; + +/** + * A reference to an AVFilterBuffer. Since filters can manipulate the origin of + * a buffer to, for example, crop image without any memcpy, the buffer origin + * and dimensions are per-reference properties. Linesize is also useful for + * image flipping, frame to field filters, etc, and so is also per-reference. + * + * TODO: add anything necessary for frame reordering + */ +typedef struct AVFilterBufferRef { + AVFilterBuffer *buf; ///< the buffer that this is a reference to + uint8_t *data[8]; ///< picture/audio data for each plane + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + AVFilterBufferRefVideoProps *video; ///< video buffer specific properties + AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties + + /** + * presentation timestamp. The time unit may change during + * filtering, as it is specified in the link and the filter code + * may need to rescale the PTS accordingly. + */ + int64_t pts; + int64_t pos; ///< byte position in stream, -1 if unknown + + int format; ///< media format + + int perms; ///< permissions, see the AV_PERM_* flags + + enum AVMediaType type; ///< media type of buffer data + + AVDictionary *metadata; ///< dictionary containing metadata key=value tags +} AVFilterBufferRef; + +/** + * Copy properties of src to dst, without copying the actual data + */ +attribute_deprecated +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); + +/** + * Add a new reference to a buffer. + * + * @param ref an existing reference to the buffer + * @param pmask a bitmask containing the allowable permissions in the new + * reference + * @return a new reference to the buffer with the same properties as the + * old, excluding any permissions denied by pmask + */ +attribute_deprecated +AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); + +/** + * Remove a reference to a buffer. If this is the last reference to the + * buffer, the buffer itself is also automatically freed. + * + * @param ref reference to the buffer, may be NULL + * + * @note it is recommended to use avfilter_unref_bufferp() instead of this + * function + */ +attribute_deprecated +void avfilter_unref_buffer(AVFilterBufferRef *ref); + +/** + * Remove a reference to a buffer and set the pointer to NULL. + * If this is the last reference to the buffer, the buffer itself + * is also automatically freed. + * + * @param ref pointer to the buffer reference + */ +attribute_deprecated +void avfilter_unref_bufferp(AVFilterBufferRef **ref); +#endif + +/** + * Get the number of channels of a buffer reference. + */ +attribute_deprecated +int avfilter_ref_get_channels(AVFilterBufferRef *ref); + +#if FF_API_AVFILTERPAD_PUBLIC +/** + * A filter pad used for either input or output. + * + * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. + * users should call avfilter_pad_get_name() and avfilter_pad_get_type() + * to access the name and type fields; there should be no need to access + * any other fields from outside of libavfilter. + */ +struct AVFilterPad { + /** + * Pad name. The name is unique among inputs and among outputs, but an + * input may have the same name as an output. This may be NULL if this + * pad has no need to ever be referenced by name. + */ + const char *name; + + /** + * AVFilterPad type. + */ + enum AVMediaType type; + + /** + * Input pads: + * Minimum required permissions on incoming buffers. Any buffer with + * insufficient permissions will be automatically copied by the filter + * system to a new buffer which provides the needed access permissions. + * + * Output pads: + * Guaranteed permissions on outgoing buffers. Any buffer pushed on the + * link must have at least these permissions; this fact is checked by + * asserts. It can be used to optimize buffer allocation. + */ + attribute_deprecated int min_perms; + + /** + * Input pads: + * Permissions which are not accepted on incoming buffers. Any buffer + * which has any of these permissions set will be automatically copied + * by the filter system to a new buffer which does not have those + * permissions. This can be used to easily disallow buffers with + * AV_PERM_REUSE. + * + * Output pads: + * Permissions which are automatically removed on outgoing buffers. It + * can be used to optimize buffer allocation. + */ + attribute_deprecated int rej_perms; + + /** + * @deprecated unused + */ + int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); + + /** + * Callback function to get a video buffer. If NULL, the filter system will + * use ff_default_get_video_buffer(). + * + * Input video pads only. + */ + AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); + + /** + * Callback function to get an audio buffer. If NULL, the filter system will + * use ff_default_get_audio_buffer(). + * + * Input audio pads only. + */ + AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); + + /** + * @deprecated unused + */ + int (*end_frame)(AVFilterLink *link); + + /** + * @deprecated unused + */ + int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); + + /** + * Filtering callback. This is where a filter receives a frame with + * audio/video data and should do its processing. + * + * Input pads only. + * + * @return >= 0 on success, a negative AVERROR on error. This function + * must ensure that frame is properly unreferenced on error if it + * hasn't been passed on to another filter. + */ + int (*filter_frame)(AVFilterLink *link, AVFrame *frame); + + /** + * Frame poll callback. This returns the number of immediately available + * samples. It should return a positive value if the next request_frame() + * is guaranteed to return one frame (with no delay). + * + * Defaults to just calling the source poll_frame() method. + * + * Output pads only. + */ + int (*poll_frame)(AVFilterLink *link); + + /** + * Frame request callback. A call to this should result in at least one + * frame being output over the given link. This should return zero on + * success, and another value on error. + * See ff_request_frame() for the error codes with a specific + * meaning. + * + * Output pads only. + */ + int (*request_frame)(AVFilterLink *link); + + /** + * Link configuration callback. + * + * For output pads, this should set the following link properties: + * video: width, height, sample_aspect_ratio, time_base + * audio: sample_rate. + * + * This should NOT set properties such as format, channel_layout, etc which + * are negotiated between filters by the filter system using the + * query_formats() callback before this function is called. + * + * For input pads, this should check the properties of the link, and update + * the filter's internal state as necessary. + * + * For both input and output pads, this should return zero on success, + * and another value on error. + */ + int (*config_props)(AVFilterLink *link); + + /** + * The filter expects a fifo to be inserted on its input link, + * typically because it has a delay. + * + * input pads only. + */ + int needs_fifo; + + int needs_writable; +}; +#endif + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs +#endif + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs +#endif + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialzed, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + struct AVFilterPool *pool; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown; + * if left to 0/0, will be automatically be copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * The buffer reference currently being received across the link by the + * destination filter. This is used internally by the filter system to + * allow automatic copying of buffers which do not have sufficient + * permissions for the destination. This should not be accessed directly + * by the filters. + */ + AVFilterBufferRef *cur_buf_copy; + + /** + * True if the link is closed. + * If set, all attemps of start_frame, filter_frame or request_frame + * will fail with AVERROR_EOF, and if necessary the reference will be + * destroyed. + * If request_frame returns AVERROR_EOF, this flag is set on the + * corresponding link. + * It can be set also be set by either the source or the destination + * filter. + */ + int closed; + + /** + * Number of channels. + */ + int channels; + + /** + * True if a frame is being requested on the link. + * Used internally by the framework. + */ + unsigned frame_requested; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count; +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + */ +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int avfilter_config_links(AVFilterContext *filter); + +#if FF_API_AVFILTERBUFFER +/** + * Create a buffer reference wrapped around an already allocated image + * buffer. + * + * @param data pointers to the planes of the image to reference + * @param linesize linesizes for the planes of the image to reference + * @param perms the required access permissions + * @param w the width of the image specified by the data and linesize arrays + * @param h the height of the image specified by the data and linesize arrays + * @param format the pixel format of the image specified by the data and linesize arrays + */ +attribute_deprecated +AVFilterBufferRef * +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, + int w, int h, enum AVPixelFormat format); + +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version + * that can handle unknown channel layouts. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channel_layout the channel layout of the buffer + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout); +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channels the number of channels of the buffer + * @param channel_layout the channel layout of the buffer, + * must be either 0 or consistent with channels + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout); + +#endif + + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** Initialize the filter system. Register all builtin filters. */ +void avfilter_register_all(void); + +#if FF_API_OLD_FILTER_REGISTER +/** Uninitialize the filter system. Unregister all filters. */ +attribute_deprecated +void avfilter_uninit(void); +#endif + +/** + * Register a filter. This is only needed if you plan to use + * avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +int avfilter_register(AVFilter *filter); + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +AVFilter *avfilter_get_by_name(const char *name); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +const AVFilter *avfilter_next(const AVFilter *prev); + +#if FF_API_OLD_FILTER_REGISTER +/** + * If filter is NULL, returns a pointer to the first registered filter pointer, + * if filter is non-NULL, returns the next pointer after filter. + * If the returned pointer points to NULL, the last registered filter + * was already reached. + * @deprecated use avfilter_next() + */ +attribute_deprecated +AVFilter **av_filter_next(AVFilter **filter); +#endif + +#if FF_API_AVFILTER_OPEN +/** + * Create a filter instance. + * + * @param filter_ctx put here a pointer to the created filter context + * on success, NULL on failure + * @param filter the filter to create an instance of + * @param inst_name Name to give to the new instance. Can be NULL for none. + * @return >= 0 in case of success, a negative error code otherwise + * @deprecated use avfilter_graph_alloc_filter() instead + */ +attribute_deprecated +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); +#endif + + +#if FF_API_AVFILTER_INIT_FILTER +/** + * Initialize a filter. + * + * @param filter the filter to initialize + * @param args A string of parameters to use when initializing the filter. + * The format and meaning of this string varies by filter. + * @param opaque Any extra non-string data needed by the filter. The meaning + * of this parameter varies by filter. + * @return zero on success + */ +attribute_deprecated +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque); +#endif + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +#if FF_API_AVFILTERBUFFER +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + +/** + * Copy the frame properties and data pointers of src to dst, without copying + * the actual data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); +#endif + +/** + * @return AVClass for AVFilterContext. + * + * @see av_opt_find(). + */ +const AVClass *avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; +#if FF_API_FOO_COUNT + attribute_deprecated + unsigned filter_count_unused; +#endif + AVFilterContext **filters; +#if !FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters + char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#if FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + */ +AVFilterGraph *avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * avfilter_graph_get_filter()) on success or NULL or failure. + */ +AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance with name name from graph. + * + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name); + +#if FF_API_AVFILTER_OPEN +/** + * Add an existing filter instance to a filter graph. + * + * @param graphctx the filter graph + * @param filter the filter to be added + * + * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a + * filter graph + */ +attribute_deprecated +int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); +#endif + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void avfilter_inout_free(AVFilterInOut **inout); + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed grap context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); +#else +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + * @deprecated Use avfilter_graph_parse_ptr() instead. + */ +attribute_deprecated +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); +#endif + +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfiltergraph.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfiltergraph.h new file mode 100644 index 0000000..b31d581 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/avfiltergraph.h @@ -0,0 +1,28 @@ +/* + * Filter graphs + * copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTERGRAPH_H +#define AVFILTER_AVFILTERGRAPH_H + +#include "avfilter.h" +#include "libavutil/log.h" + +#endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersink.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersink.h new file mode 100644 index 0000000..ce96d08 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersink.h @@ -0,0 +1,186 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Get an audio/video buffer data from buffer_sink and put it in bufref. + * + * This function works with both audio and video buffer sinks. + * + * @param buffer_sink pointer to a buffersink or abuffersink context + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +attribute_deprecated +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, + AVFilterBufferRef **bufref, int flags); + +/** + * Get the number of immediately available frames. + */ +attribute_deprecated +int av_buffersink_poll_frame(AVFilterContext *ctx); + +/** + * Get a buffer with filtered data from sink and put it in buf. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +attribute_deprecated +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); + +/** + * Same as av_buffersink_read, but with the ability to specify the number of + * samples read. This function is less efficient than av_buffersink_read(), + * because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). buf + * will contain exactly nb_samples audio samples, except at the end + * of stream, when it can contain less than nb_samples. + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @warning do not mix this function with av_buffersink_read(). Use only one or + * the other with a single sink, not both. + */ +attribute_deprecated +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples); +#endif + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * Get the frame rate of the input. + */ +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @warning do not mix this function with av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersrc.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersrc.h new file mode 100644 index 0000000..89613e1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/buffersrc.h @@ -0,0 +1,148 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * Memory buffer source API. + */ + +#include "libavcodec/avcodec.h" +#include "avfilter.h" + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + +#if FF_API_AVFILTERBUFFER + /** + * Ignored + */ + AV_BUFFERSRC_FLAG_NO_COPY = 2, +#endif + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Add buffer data in picref to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param picref a buffer reference, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_ref(AVFilterContext *buffer_src, + AVFilterBufferRef *picref, int flags); + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +#if FF_API_AVFILTERBUFFER +/** + * Add a buffer to the filtergraph s. + * + * @param buf buffer containing frame data to be passed down the filtergraph. + * This function will take ownership of buf, the user must not free it. + * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. + * + * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() + */ +attribute_deprecated +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +#endif + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and av_buffersrc_write_frame() is + * that av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/version.h new file mode 100644 index 0000000..541b869 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavfilter/version.h @@ -0,0 +1,89 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFILTER_VERSION_MAJOR 3 +#define LIBAVFILTER_VERSION_MINOR 90 +#define LIBAVFILTER_VERSION_MICRO 100 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_AVFILTERPAD_PUBLIC +#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FOO_COUNT +#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FILL_FRAME +#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_BUFFERSRC_BUFFER +#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTERBUFFER +#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_OPTS +#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_ACONVERT_FILTER +#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_OPEN +#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_INIT_FILTER +#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_REGISTER +#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_GRAPH_PARSE +#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_DRAWTEXT_OLD_TIMELINE +#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avformat.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avformat.h new file mode 100644 index 0000000..4e5683c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avformat.h @@ -0,0 +1,2239 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf I/O and Muxing/Demuxing Library + * @{ + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the av_iformat_next() / + * av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * Lavf allows to configure muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL or filename, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * av_dict_set(&options, "video_size", "640x480", 0); + * av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or + * avcodec_decode_subtitle2() if the caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with av_free_packet() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + * + */ + +#include +#include /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- . + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct AVFrac { + int64_t val, num, den; +} AVFrac; + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for + raw picture data. */ +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#if LIBAVFORMAT_VERSION_MAJOR <= 54 +#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks +#else +#define AVFMT_TS_NONSTRICT 0x20000 +#endif + /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in av_write_frame and + av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. Only used in raw format right + * now. 'avformat_new_stream' should be called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The single packet associated with it will be returned + * among the first few packets read from the file unless seeking takes place. + * It can also be accessed at any time in AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; + /** + * Codec context associated with this stream. Allocated and freed by + * libavformat. + * + * - decoding: The demuxer exports codec information stored in the headers + * here. + * - encoding: The user sets codec information, the muxer writes it to the + * output. Mandatory fields as specified in AVCodecContext + * documentation must be set even if this AVCodecContext is + * not actually used for encoding. + */ + AVCodecContext *codec; + void *priv_data; + + /** + * encoding: pts generation when outputting stream + */ + struct AVFrac pts; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: set by libavformat in avformat_write_header. The muxer may use the + * user-provided value of @ref AVCodecContext.time_base "codec->time_base" + * as a hint. + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Stream information used internally by av_find_stream_info() + */ +#define MAX_STD_TIMEBASES (60*12+6) + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t reference_dts; + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ +#define MAX_PROBE_PACKETS 2500 + int probe_packets; + + /** + * Number of frames that have been demuxed during av_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + * + * Code outside avformat should access this field using: + * av_stream_get/set_r_frame_rate(stream) + */ + AVRational r_frame_rate; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + +} AVStream; + +AVRational av_stream_get_r_frame_rate(const AVStream *s); +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + */ +typedef struct AVFormatContext { + /** + * A class for logging and AVOptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * Can only be iformat or oformat, not both at the same time. + * + * decoding: set by avformat_open_input(). + * encoding: set by the user. + */ + struct AVInputFormat *iformat; + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + */ + void *priv_data; + + /** + * I/O context. + * + * decoding: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * encoding: set by the user. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ + + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * decoding: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * encoding: streams are created by the user before avformat_write_header(). + */ + unsigned int nb_streams; + AVStream **streams; + + char filename[1024]; /**< input or output filename */ + + /** + * Decoding: position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + */ + int64_t duration; + + /** + * Decoding: total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int bit_rate; + + unsigned int packet_size; + int max_delay; + + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. + + /** + * decoding: size of data to probe; encoding: unused. + */ + unsigned int probesize; + + /** + * decoding: maximum time (in AV_TIME_BASE units) during which the input should + * be analyzed in avformat_find_stream_info(). + */ + int max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * muxing : unused + * demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * muxing : set by user + * demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the unix epoch (00:00 1st January 1970). That is, pts=0 + * in the stream was captured at this real world time. + * - encoding: Set by user. + * - decoding: Unused. + */ + int64_t start_time_realtime; + + /** + * decoding: number of frames used to probe fps + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * decoding: set by the user before avformat_open_input(). + * encoding: set by the user before avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int use_wallclock_as_timestamps; + + /** + * Avoid negative timestamps during muxing. + * 0 -> allow negative timestamps + * 1 -> avoid negative timestamps + * -1 -> choose automatically (default) + * Note, this only works when interleave_packet_per_dts is in use. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int avoid_negative_ts; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user via AVOptions (NO direct access) + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + unsigned int skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + */ + int probe_score; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + */ + AVCodec *subtitle_codec; +} AVFormatContext; + +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see av_register_input_format() + * @see av_register_output_format() + */ +void av_register_all(void); + +void av_register_input_format(AVInputFormat *format); +void av_register_output_format(AVOutputFormat *format); + +/** + * Do global initialization of network components. This is optional, + * but recommended, since it avoids the overhead of implicitly + * doing the setup for each session. + * + * Calling this function will become mandatory if using network + * protocols at some major version bump. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. + */ +int avformat_network_deinit(void); + +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +AVInputFormat *av_iformat_next(AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +AVOutputFormat *av_oformat_next(AVOutputFormat *f); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +#if FF_API_ALLOC_OUTPUT_CONTEXT +/** + * @deprecated deprecated in favor of avformat_alloc_output_context2() + */ +attribute_deprecated +AVFormatContext *avformat_alloc_output_context(const char *format, + AVOutputFormat *oformat, + const char *filename); +#endif + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param filename the filename of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ +int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param filename Name of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +#if FF_API_FORMAT_PARAMETERS +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @return >=0 if OK, AVERROR_xxx on error + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + * + * @deprecated use avformat_find_stream_info. + */ +attribute_deprecated +int av_find_stream_info(AVFormatContext *ic); +#endif + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +#if FF_API_READ_PACKET +/** + * @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw + * unprocessed packets + * + * Read a transport packet from a media file. + * + * This function is obsolete and should never be used. + * Use av_read_frame() instead. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +attribute_deprecated +int av_read_packet(AVFormatContext *s, AVPacket *pkt); +#endif + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * av_free_packet when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +#if FF_API_CLOSE_INPUT_FILE +/** + * @deprecated use avformat_close_input() + * Close a media file (but not its codecs). + * + * @param s media file handle + */ +attribute_deprecated +void av_close_input_file(AVFormatContext *s); +#endif + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#if FF_API_NEW_STREAM +/** + * Add a new stream to a media file. + * + * Can only be called in the read_header() function. If the flag + * AVFMTCTX_NOHEADER is in the format context, then new streams + * can be added in read_packet too. + * + * @param s media file handle + * @param id file-format-dependent stream ID + */ +attribute_deprecated +AVStream *av_new_stream(AVFormatContext *s, int id); +#endif + +#if FF_API_SET_PTS_INFO +/** + * @deprecated this function is not supposed to be called outside of lavf + */ +attribute_deprecated +void av_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); +#endif + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number + +/** + * @addtogroup lavf_encoding + * @{ + */ +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. + */ +int avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * The packet shall contain one audio or video frame. + * The packet must be correctly interleaved according to the container + * specification, if not then av_interleaved_write_frame must be used. + * + * @param s media file handle + * @param pkt The packet, which contains the stream_index, buf/buf_size, + * dts/pts, ... + * This can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, + * for muxers that buffer up data internally before writing it + * to the output. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + */ +int av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * The packet must contain one audio or video frame. + * If the packets are already correctly interleaved, the application should + * call av_write_frame() instead as it is slightly faster. It is also important + * to keep in mind that completely non-interleaved input will need huge amounts + * of memory to interleave with this, so it is preferable to interleave at the + * demuxer level. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. pkt->buf must be set + * to a valid AVBufferRef describing the packet data. Libavformat takes + * ownership of this reference and will unref it when it sees fit. The caller + * must not access the data through this reference after this function returns. + * This can be NULL (at any time, not just at the end), to flush the + * interleaving queues. + * Packet's @ref AVPacket.stream_index "stream_index" field must be set to the + * index of the corresponding stream in @ref AVFormatContext.streams + * "s.streams". + * It is very strongly recommended that timing information (@ref AVPacket.pts + * "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to + * correct values. + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, + AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +void av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param extensions a comma-separated list of filename extensions + */ +int av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avio.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avio.h new file mode 100644 index 0000000..4f4ac3c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/avio.h @@ -0,0 +1,481 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + + +#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int must_flush; /**< true if the next seek should flush */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; +} AVIOContext; + +/* unbuffered I/O */ + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int avio_check(const char *url, int flags); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with av_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with av_malloc() and friends. + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +void avio_w8(AVIOContext *s, int b); +void avio_write(AVIOContext *s, const unsigned char *buf, int size); +void avio_wl64(AVIOContext *s, uint64_t val); +void avio_wb64(AVIOContext *s, uint64_t val); +void avio_wl32(AVIOContext *s, unsigned int val); +void avio_wb32(AVIOContext *s, unsigned int val); +void avio_wl24(AVIOContext *s, unsigned int val); +void avio_wb24(AVIOContext *s, unsigned int val); +void avio_wl16(AVIOContext *s, unsigned int val); +void avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @return number of bytes written. + */ +int avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Passing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Oring this flag as into the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int url_feof(AVIOContext *s); + +/** @warning currently size is limited */ +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data to the output s. + * + * Force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + */ +void avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int avio_r8 (AVIOContext *s); +unsigned int avio_rl16(AVIOContext *s); +unsigned int avio_rl24(AVIOContext *s); +unsigned int avio_rl32(AVIOContext *s); +uint64_t avio_rl64(AVIOContext *s); +unsigned int avio_rb16(AVIOContext *s); +unsigned int avio_rb24(AVIOContext *s); +unsigned int avio_rb32(AVIOContext *s); +uint64_t avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_closep + */ +int avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_close + */ +int avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with av_free(). + * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * @param pause 1 for pause, 0 for resume + */ +int avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +#endif /* AVFORMAT_AVIO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/version.h new file mode 100644 index 0000000..0028c9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavformat/version.h @@ -0,0 +1,76 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFORMAT_VERSION_MAJOR 55 +#define LIBAVFORMAT_VERSION_MINOR 19 +#define LIBAVFORMAT_VERSION_MICRO 104 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_ALLOC_OUTPUT_CONTEXT +#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FORMAT_PARAMETERS +#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_NEW_STREAM +#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_SET_PTS_INFO +#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CLOSE_INPUT_FILE +#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_READ_PACKET +#define FF_API_READ_PACKET (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ASS_SSA +#define FF_API_ASS_SSA (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/avresample.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/avresample.h new file mode 100644 index 0000000..f62e533 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/avresample.h @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_AVRESAMPLE_H +#define AVRESAMPLE_AVRESAMPLE_H + +/** + * @file + * @ingroup lavr + * external API header + */ + +/** + * @defgroup lavr Libavresample + * @{ + * + * Libavresample (lavr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lavr is done through AVAudioResampleContext, which is + * allocated with avresample_alloc_context(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * AVAudioResampleContext *avr = avresample_alloc_context(); + * av_opt_set_int(avr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_int(avr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(avr, "in_sample_rate", 48000, 0); + * av_opt_set_int(avr, "out_sample_rate", 44100, 0); + * av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once the context is initialized, it must be opened with avresample_open(). If + * you need to change the conversion parameters, you must close the context with + * avresample_close(), change the parameters as described above, then reopen it + * again. + * + * The conversion itself is done by repeatedly calling avresample_convert(). + * Note that the samples may get buffered in two places in lavr. The first one + * is the output FIFO, where the samples end up if the output buffer is not + * large enough. The data stored in there may be retrieved at any time with + * avresample_read(). The second place is the resampling delay buffer, + * applicable only when resampling is done. The samples in it require more input + * before they can be processed. Their current amount is returned by + * avresample_get_delay(). At the end of conversion the resampling buffer can be + * flushed by calling avresample_convert() with NULL input. + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_linesize, in_samples; + * + * while (get_input(&input, &in_linesize, &in_samples)) { + * uint8_t *output + * int out_linesize; + * int out_samples = avresample_available(avr) + + * av_rescale_rnd(avresample_get_delay(avr) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, &out_linesize, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = avresample_convert(avr, &output, out_linesize, out_samples, + * input, in_linesize, in_samples); + * handle_output(output, out_linesize, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished and the FIFOs are flushed if required, the + * conversion context and everything associated with it must be freed with + * avresample_free(). + */ + +#include "libavutil/avutil.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavresample/version.h" + +#define AVRESAMPLE_MAX_CHANNELS 32 + +typedef struct AVAudioResampleContext AVAudioResampleContext; + +/** Mixing Coefficient Types */ +enum AVMixCoeffType { + AV_MIX_COEFF_TYPE_Q8, /** 16-bit 8.8 fixed-point */ + AV_MIX_COEFF_TYPE_Q15, /** 32-bit 17.15 fixed-point */ + AV_MIX_COEFF_TYPE_FLT, /** floating-point */ + AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */ +}; + +/** Resampling Filter Types */ +enum AVResampleFilterType { + AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */ + AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +enum AVResampleDitherMethod { + AV_RESAMPLE_DITHER_NONE, /**< Do not use dithering */ + AV_RESAMPLE_DITHER_RECTANGULAR, /**< Rectangular Dither */ + AV_RESAMPLE_DITHER_TRIANGULAR, /**< Triangular Dither*/ + AV_RESAMPLE_DITHER_TRIANGULAR_HP, /**< Triangular Dither with High Pass */ + AV_RESAMPLE_DITHER_TRIANGULAR_NS, /**< Triangular Dither with Noise Shaping */ + AV_RESAMPLE_DITHER_NB, /**< Number of dither types. Not part of ABI. */ +}; + +/** + * Return the LIBAVRESAMPLE_VERSION_INT constant. + */ +unsigned avresample_version(void); + +/** + * Return the libavresample build-time configuration. + * @return configure string + */ +const char *avresample_configuration(void); + +/** + * Return the libavresample license. + */ +const char *avresample_license(void); + +/** + * Get the AVClass for AVAudioResampleContext. + * + * Can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options + * without allocating a context. + * + * @see av_opt_find(). + * + * @return AVClass for AVAudioResampleContext + */ +const AVClass *avresample_get_class(void); + +/** + * Allocate AVAudioResampleContext and set options. + * + * @return allocated audio resample context, or NULL on failure + */ +AVAudioResampleContext *avresample_alloc_context(void); + +/** + * Initialize AVAudioResampleContext. + * + * @param avr audio resample context + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_open(AVAudioResampleContext *avr); + +/** + * Close AVAudioResampleContext. + * + * This closes the context, but it does not change the parameters. The context + * can be reopened with avresample_open(). It does, however, clear the output + * FIFO and any remaining leftover samples in the resampling delay buffer. If + * there was a custom matrix being used, that is also cleared. + * + * @see avresample_convert() + * @see avresample_set_matrix() + * + * @param avr audio resample context + */ +void avresample_close(AVAudioResampleContext *avr); + +/** + * Free AVAudioResampleContext and associated AVOption values. + * + * This also calls avresample_close() before freeing. + * + * @param avr audio resample context + */ +void avresample_free(AVAudioResampleContext **avr); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libavresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param normalize if 1, coefficients will be normalized to prevent + * overflow. if 0, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, int normalize, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding); + +/** + * Get the current channel mixing matrix. + * + * If no custom matrix has been previously set or the AVAudioResampleContext is + * not open, an error is returned. + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_get_matrix(AVAudioResampleContext *avr, double *matrix, + int stride); + +/** + * Set channel mixing matrix. + * + * Allows for setting a custom mixing matrix, overriding the default matrix + * generated internally during avresample_open(). This function can be called + * anytime on an allocated context, either before or after calling + * avresample_open(), as long as the channel layouts have been set. + * avresample_convert() always uses the current matrix. + * Calling avresample_close() on the context will clear the current matrix. + * + * @see avresample_close() + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_matrix(AVAudioResampleContext *avr, const double *matrix, + int stride); + +/** + * Set a customized input channel mapping. + * + * This function can only be called when the allocated context is not open. + * Also, the input channel layout must have already been set. + * + * Calling avresample_close() on the context will clear the channel mapping. + * + * The map for each input channel specifies the channel index in the source to + * use for that particular channel, or -1 to mute the channel. Source channels + * can be duplicated by using the same index for multiple input channels. + * + * Examples: + * + * Reordering 5.1 AAC order (C,L,R,Ls,Rs,LFE) to FFmpeg order (L,R,C,LFE,Ls,Rs): + * { 1, 2, 0, 5, 3, 4 } + * + * Muting the 3rd channel in 4-channel input: + * { 0, 1, -1, 3 } + * + * Duplicating the left channel of stereo input: + * { 0, 0 } + * + * @param avr audio resample context + * @param channel_map customized input channel mapping + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_channel_mapping(AVAudioResampleContext *avr, + const int *channel_map); + +/** + * Set compensation for resampling. + * + * This can be called anytime after avresample_open(). If resampling is not + * automatically enabled because of a sample rate conversion, the + * "force_resampling" option must have been set to 1 when opening the context + * in order to use resampling compensation. + * + * @param avr audio resample context + * @param sample_delta compensation delta, in samples + * @param compensation_distance compensation distance, in samples + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_compensation(AVAudioResampleContext *avr, int sample_delta, + int compensation_distance); + +/** + * Convert input samples and write them to the output FIFO. + * + * The upper bound on the number of output samples is given by + * avresample_available() + (avresample_get_delay() + number of input samples) * + * output sample rate / input sample rate. + * + * The output data can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to avresample_read(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. avresample_get_delay() tells the number of remaining + * samples. To get this data as output, call avresample_convert() with NULL + * input. + * + * At the end of the conversion process, there may be data remaining in the + * internal FIFO buffer. avresample_available() tells the number of remaining + * samples. To get this data as output, either call avresample_convert() with + * NULL input or call avresample_read(). + * + * @see avresample_available() + * @see avresample_read() + * @see avresample_get_delay() + * + * @param avr audio resample context + * @param output output data pointers + * @param out_plane_size output plane size, in bytes. + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * output, which could slow down some conversions. + * @param out_samples maximum number of samples that the output buffer can hold + * @param input input data pointers + * @param in_plane_size input plane size, in bytes + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * input, which could slow down some conversions. + * @param in_samples number of input samples to convert + * @return number of samples written to the output buffer, + * not including converted samples added to the internal + * output FIFO + */ +int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, + int out_plane_size, int out_samples, uint8_t **input, + int in_plane_size, int in_samples); + +/** + * Return the number of samples currently in the resampling delay buffer. + * + * When resampling, there may be a delay between the input and output. Any + * unconverted samples in each call are stored internally in a delay buffer. + * This function allows the user to determine the current number of samples in + * the delay buffer, which can be useful for synchronization. + * + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples currently in the resampling delay buffer + */ +int avresample_get_delay(AVAudioResampleContext *avr); + +/** + * Return the number of available samples in the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. The samples in the FIFO can be read with avresample_read() or + * avresample_convert(). + * + * @see avresample_read() + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples available for reading + */ +int avresample_available(AVAudioResampleContext *avr); + +/** + * Read samples from the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. This function can be used to read samples from that internal FIFO. + * + * @see avresample_available() + * @see avresample_convert() + * + * @param avr audio resample context + * @param output output data pointers. May be NULL, in which case + * nb_samples of data is discarded from output FIFO. + * @param nb_samples number of samples to read from the FIFO + * @return the number of samples written to output + */ +int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples); + +/** + * @} + */ + +#endif /* AVRESAMPLE_AVRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/version.h new file mode 100644 index 0000000..bd52ca6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavresample/version.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_VERSION_H +#define AVRESAMPLE_VERSION_H + +/** + * @file + * @ingroup lavr + * Libavresample version macros. + */ + +#define LIBAVRESAMPLE_VERSION_MAJOR 1 +#define LIBAVRESAMPLE_VERSION_MINOR 1 +#define LIBAVRESAMPLE_VERSION_MICRO 0 + +#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_VERSION AV_VERSION(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_BUILD LIBAVRESAMPLE_VERSION_INT + +#define LIBAVRESAMPLE_IDENT "Lavr" AV_STRINGIFY(LIBAVRESAMPLE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_RESAMPLE_CLOSE_OPEN +#define FF_API_RESAMPLE_CLOSE_OPEN (LIBAVRESAMPLE_VERSION_MAJOR < 2) +#endif + +#endif /* AVRESAMPLE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/adler32.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/adler32.h new file mode 100644 index 0000000..8c08d2b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/adler32.h @@ -0,0 +1,52 @@ +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler32 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/aes.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/aes.h new file mode 100644 index 0000000..09efbda --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/aes.h @@ -0,0 +1,65 @@ +/* + * copyright (c) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/attributes.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/attributes.h new file mode 100644 index 0000000..8c0e5b2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/attributes.h @@ -0,0 +1,160 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + + +#if defined(__GNUC__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#ifdef __GNUC__ +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audio_fifo.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audio_fifo.h new file mode 100644 index 0000000..903b8f1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audio_fifo.h @@ -0,0 +1,149 @@ +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audioconvert.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audioconvert.h new file mode 100644 index 0000000..300a67c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/audioconvert.h @@ -0,0 +1,6 @@ + +#include "version.h" + +#if FF_API_AUDIOCONVERT +#include "channel_layout.h" +#endif diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avassert.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avassert.h new file mode 100644 index 0000000..41f5e0e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avassert.h @@ -0,0 +1,66 @@ +/* + * copyright (c) 2010 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speedloss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#else +#define av_assert2(cond) ((void)0) +#endif + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avconfig.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avconfig.h new file mode 100644 index 0000000..f6685b7 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avconfig.h @@ -0,0 +1,8 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 1 +#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 +#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avstring.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avstring.h new file mode 100644 index 0000000..438ef79 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avstring.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with av_free(). + */ +char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to a av_malloced string. + */ +char *av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +int av_isdigit(int c); + +/** + * Locale-independent conversion of ASCII isgraph. + */ +int av_isgraph(int c); + +/** + * Locale-independent conversion of ASCII isspace. + */ +int av_isspace(int c); + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +int av_isxdigit(int c); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strncasecmp(const char *a, const char *b, size_t n); + + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *av_dirname(char *path); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE 0x01 + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT 0x02 + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see av_bprint_escape() + */ +int av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avutil.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avutil.h new file mode 100644 index 0000000..4692c00 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/avutil.h @@ -0,0 +1,320 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * external API header + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref lsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu Common utility functions + * + * @brief + * libavutil contains the code shared across all the other FFmpeg + * libraries + * + * @note In order to use the functions provided by avutil you must include + * the specific header. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Maths + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup lavu_internal Internal + * + * Not exported functions, for internal usage only + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1< + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/blowfish.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/blowfish.h new file mode 100644 index 0000000..0b00453 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/blowfish.h @@ -0,0 +1,77 @@ +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bprint.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bprint.h new file mode 100644 index 0000000..839ec1e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bprint.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ +#define FF_PAD_STRUCTURE(size, ...) \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct { __VA_ARGS__ })]; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ +typedef struct AVBPrint { + FF_PAD_STRUCTURE(1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; + ) +} AVBPrint; + +/** + * Convenience macros for special values for av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bswap.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bswap.h new file mode 100644 index 0000000..06f6548 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_BFIN +# include "bfin/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/buffer.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/buffer.h new file mode 100644 index 0000000..b4399fd --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/buffer.h @@ -0,0 +1,274 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- av_buffer_alloc() to just allocate a new buffer, and + * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with av_buffer_ref(). + * Use av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The av_buffer_is_writable() function is + * provided to check whether this is true and av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *av_buffer_alloc(int size); + +/** + * Same as av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls av_free() on the buffer data. + * This function is meant to be passed to av_buffer_create(), not called + * directly. + */ +void av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until av_buffer_ref() is called on buf. + */ +int av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by av_buffer_create. + */ +void *av_buffer_get_opaque(const AVBufferRef *buf); + +int av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with av_realloc() only if it was + * initially allocated through av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to + * get a reference to a new buffer, similar to av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with av_buffer_pool_init() and freed with + * av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + * @see av_buffer_pool_can_uninit() + */ +void av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/channel_layout.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/channel_layout.h new file mode 100644 index 0000000..ba4f96d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/channel_layout.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2006 Michael Niedermayer + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel convenience macros + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_NB +}; + +/** + * @} + */ + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, optionally followed by 'c', yielding + * the default channel layout for that number of channels (@see + * av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * @warning Starting from the next major bump the trailing character + * 'c' to specify a number of channels will be required, while a + * channel layout mask could also be specified as a decimal number + * (if and only if not followed by "c"). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t av_get_channel_layout(const char *name); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/common.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/common.h new file mode 100644 index 0000000..b1203ad --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/common.h @@ -0,0 +1,464 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#include +#include +#include +#include +#include +#include +#include + +#include "attributes.h" +#include "version.h" +#include "libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* assume a>0 and b>0 */ +#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +/* misc math functions */ + +/** + * Reverse the order of the bits of an 8-bits unsigned integer. + */ +#if FF_API_AV_REVERSE +extern attribute_deprecated const uint8_t av_reverse[256]; +#endif + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef av_log2 +av_const int av_log2(unsigned v); +#endif + +#ifndef av_log2_16bit +av_const int av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<> 31 & ((1<= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= GET_BYTE;\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= GET_BYTE - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/cpu.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/cpu.h new file mode 100644 index 0000000..55c3ec9 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/cpu.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +// #if LIBAVUTIL_VERSION_MAJOR <52 +#define AV_CPU_FLAG_CMOV 0x1001000 ///< supports cmov instruction +// #else +// #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +// #endif +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by av_force_cpu_flags() if that was used + * before. So av_get_cpu_flags() can easily be used in a application to + * detect the enabled cpu flags. + */ +int av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + * + * @warning this function is not thread safe. + */ +attribute_deprecated void av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int av_cpu_count(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/crc.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/crc.h new file mode 100644 index 0000000..f4219ca --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/crc.h @@ -0,0 +1,85 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include +#include +#include "attributes.h" + +/** + * @defgroup lavu_crc32 CRC32 + * @ingroup lavu_crypto + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_24_IEEE = 12, + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see av_crc_init() "le" parameter + */ +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/dict.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/dict.h new file mode 100644 index 0000000..38f03a4 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/dict.h @@ -0,0 +1,152 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + * @code + * AVDictionary *d = NULL; // "create" an empty dictionary + * av_dict_set(&d, "foo", "bar", 0); // add an entry + * + * char *k = av_strdup("key"); // if your strings are already allocated, + * char *v = av_strdup("value"); // you can avoid copying them like this + * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + * + * AVDictionaryEntry *t = NULL; + * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + * <....> // iterate over all entries in d + * } + * + * av_dict_free(&d); + * @endcode + * + */ + +#define AV_DICT_MATCH_CASE 1 +#define AV_DICT_IGNORE_SUFFIX 2 +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() and children. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() and chilren. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param flags Allows case as well as suffix-insensitive comparisons. + * @return Found entry or NULL, changing key or value leads to undefined behavior. + */ +AVDictionaryEntry * +av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will be av_strduped depending on flags) + * @param value entry value to add to *pm (will be av_strduped depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Parse the key/value pairs list and add to a dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + */ +void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void av_dict_free(AVDictionary **m); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/error.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/error.h new file mode 100644 index 0000000..f3fd7bb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/error.h @@ -0,0 +1,117 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include +#include + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. + +#define AV_ERROR_MAX_STRING_SIZE 64 + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/eval.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/eval.h new file mode 100644 index 0000000..6159b0f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/eval.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2002 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with av_expr_parse(). + */ +void av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value for + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/fifo.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/fifo.h new file mode 100644 index 0000000..849b9a6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/fifo.h @@ -0,0 +1,144 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc(unsigned int size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void av_fifo_free(AVFifoBuffer *f); + +/** + * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int av_fifo_size(AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int av_fifo_space(AVFifoBuffer *f); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/file.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/file.h new file mode 100644 index 0000000..a7364fe --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/file.h @@ -0,0 +1,66 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * The returned buffer must be released with av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by av_file_map() + */ +void av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or -1 on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + */ +int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/frame.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/frame.h new file mode 100644 index 0000000..1c785dd --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/frame.h @@ -0,0 +1,659 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include + +#include "libavcodec/version.h" + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" + +enum AVColorSpace{ + AVCOL_SPC_RGB = 0, + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_FCC = 4, + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above + AVCOL_SPC_SMPTE240M = 7, + AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_NB , ///< Not part of ABI +}; +#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG + +enum AVColorRange{ + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB , ///< Not part of ABI +}; + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, +}; + +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * Similarly fields that are marked as to be only accessed by + * av_opt_ptr() can be reordered. This allows 2 forks to add fields + * without breaking compatibility with each other. + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiplies of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + uint8_t *base[AV_NUM_DATA_POINTERS]; +#endif + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + */ + int64_t pkt_pts; + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int reference; + + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + /** + * mbskip_table[mb]>=1 if MB didn't change + * stride= mb_width = (width+15)>>4 + */ + attribute_deprecated + uint8_t *mbskip_table; + + /** + * motion vector table + * @code + * example: + * int mv_sample_log2= 4 - motion_subsample_log2; + * int mb_width= (width+15)>>4; + * int mv_stride= (mb_width << mv_sample_log2) + 1; + * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; + * @endcode + */ + attribute_deprecated + int16_t (*motion_val[2])[2]; + + /** + * macroblock type table + * mb_type_base + mb_width + 2 + */ + attribute_deprecated + uint32_t *mb_type; + + /** + * DCT coefficients + */ + attribute_deprecated + short *dct_coeff; + + /** + * motion reference frame index + * the order in which these are stored can depend on the codec. + */ + attribute_deprecated + int8_t *ref_index[2]; +#endif + + /** + * for some private data of the user + */ + void *opaque; + + /** + * error + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int type; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int buffer_hints; + + /** + * Pan scan. + */ + attribute_deprecated + struct AVPanScan *pan_scan; +#endif + + /** + * reordered opaque 64bit (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + +#if FF_API_AVFRAME_LAVC + /** + * @deprecated this field is unused + */ + attribute_deprecated void *hwaccel_picture_private; + + attribute_deprecated + struct AVCodecContext *owner; + attribute_deprecated + void *thread_opaque; + + /** + * log2 of the size of the block which a single vector in motion_val represents: + * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) + */ + attribute_deprecated + uint8_t motion_subsample_log2; +#endif + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * Code outside libavcodec should access this field using: + * av_frame_get_best_effort_timestamp(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_pos(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_duration(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * Code outside libavcodec should access this field using: + * av_frame_get_metadata(frame) + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * Code outside libavcodec should access this field using: + * av_frame_get_decode_error_flags(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * Code outside libavcodec should access this field using: + * av_frame_get_channels(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. It must be accessed using av_frame_get_pkt_size() and + * av_frame_set_pkt_size(). + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + + /** + * YUV colorspace type. + * It must be accessed using av_frame_get_colorspace() and + * av_frame_set_colorspace(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * It must be accessed using av_frame_get_color_range() and + * av_frame_set_color_range(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + + /** + * Not to be accessed directly from outside libavutil + */ + AVBufferRef *qp_table_buf; +} AVFrame; + +/** + * Accessors for some AVFrame fields. + * The position of these field in the structure is not part of the ABI, + * they should not be accessed directly outside libavcodec. + */ +int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); +void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_duration (const AVFrame *frame); +void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_pos (const AVFrame *frame); +void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +int64_t av_frame_get_channel_layout (const AVFrame *frame); +void av_frame_set_channel_layout (AVFrame *frame, int64_t val); +int av_frame_get_channels (const AVFrame *frame); +void av_frame_set_channels (AVFrame *frame, int val); +int av_frame_get_sample_rate (const AVFrame *frame); +void av_frame_set_sample_rate (AVFrame *frame, int val); +AVDictionary *av_frame_get_metadata (const AVFrame *frame); +void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +int av_frame_get_decode_error_flags (const AVFrame *frame); +void av_frame_set_decode_error_flags (AVFrame *frame, int val); +int av_frame_get_pkt_size(const AVFrame *frame); +void av_frame_set_pkt_size(AVFrame *frame, int val); +AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); +int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame); +void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +enum AVColorRange av_frame_get_color_range(const AVFrame *frame); +void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Setup a new reference to the data described by a given frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everythnig contained in src to dst and reset src. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +#endif /* AVUTIL_FRAME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/hmac.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/hmac.h new file mode 100644 index 0000000..d36d4de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/hmac.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include + +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224 = 10, + AV_HMAC_SHA256, + AV_HMAC_SHA384, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/imgutils.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/imgutils.h new file mode 100644 index 0000000..ab32d66 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/imgutils.h @@ -0,0 +1,200 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param[in] align the assumed linesize alignment + */ +int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesizes linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat_readwrite.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat_readwrite.h new file mode 100644 index 0000000..9709f4d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intfloat_readwrite.h @@ -0,0 +1,40 @@ +/* + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_READWRITE_H +#define AVUTIL_INTFLOAT_READWRITE_H + +#include +#include "attributes.h" + +/* IEEE 80 bits extended float */ +typedef struct AVExtFloat { + uint8_t exponent[2]; + uint8_t mantissa[8]; +} AVExtFloat; + +attribute_deprecated double av_int2dbl(int64_t v) av_const; +attribute_deprecated float av_int2flt(int32_t v) av_const; +attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const; +attribute_deprecated int64_t av_dbl2int(double d) av_const; +attribute_deprecated int32_t av_flt2int(float d) av_const; +attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const; + +#endif /* AVUTIL_INTFLOAT_READWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intreadwrite.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intreadwrite.h new file mode 100644 index 0000000..7ee6977 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/intreadwrite.h @@ -0,0 +1,621 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lfg.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lfg.h new file mode 100644 index 0000000..ec90562 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lfg.h @@ -0,0 +1,62 @@ +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/log.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/log.h new file mode 100644 index 0000000..55459e8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/log.h @@ -0,0 +1,313 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_NB, ///< not part of ABI/API +}AVClassCategory; + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param ap The arguments referenced by the format string. + */ +void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* av_default_item_name(void* ctx); +AVClassCategory av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formated line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * av_dlog macros + * Useful to print debug messages that shouldn't get compiled in normally. + */ + +#ifdef DEBUG +# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif + +/** + * Skip repeated messages, this requires the user app to use av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 +void av_log_set_flags(int arg); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lzo.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lzo.h new file mode 100644 index 0000000..c034039 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mathematics.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mathematics.h new file mode 100644 index 0000000..71f0392 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mathematics.h @@ -0,0 +1,147 @@ +/* + * copyright (c) 2005-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include +#include +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * @{ + */ + + +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE +}; + +/** + * Return the greatest common divisor of a and b. + * If both a and b are 0 or either or both are <0 then behavior is + * undefined. + */ +int64_t av_const av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * A simple a*b/c isn't possible as it can overflow. + */ +int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * A simple a*b/c isn't possible as it can overflow. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding) av_const; + +/** + * Compare 2 timestamps each in its own timebases. + * The result of the function is undefined if one of the timestamps + * is outside the int64_t range when represented in the others timebase. + * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position + */ +int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare 2 integers modulo mod. + * That is we compare integers a and b for which only the least + * significant log2(mod) bits are known. + * + * @param mod must be a power of 2 + * @return a negative value if a is smaller than b + * a positive value if a is greater than b + * 0 if a equals b + */ +int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * @param in_ts Input timestamp + * @param in_tb Input timesbase + * @param fs_tb Duration and *last timebase + * @param duration duration till the next call + * @param out_tb Output timesbase + */ +int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/md5.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/md5.h new file mode 100644 index 0000000..79702c8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/md5.h @@ -0,0 +1,81 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size av_md5_size) + */ +void av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mem.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mem.h new file mode 100644 index 0000000..77b7adc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/mem.h @@ -0,0 +1,342 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include +#include + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * @{ + */ + + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__TI_COMPILER_VERSION__) + #define DECLARE_ALIGNED(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + t __attribute__((aligned(n))) v + #define DECLARE_ASM_CONST(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + static const t __attribute__((aligned(n))) v +#elif defined(__GNUC__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU). + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + */ +void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of size * nmemb bytes with av_malloc(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_malloc() + */ +av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_malloc(nmemb * size); +} + +/** + * Allocate or reallocate a block of memory. + * If ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param size Size in bytes of the memory block to be allocated or + * reallocated. + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + * @see av_fast_realloc() + */ +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate or reallocate a block of memory. + * This function does the same thing as av_realloc, except: + * - It takes two arguments and checks the result of the multiplication for + * integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic "buf = realloc(buf); if (!buf) return -1;". + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate or reallocate a block of memory. + * If *ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param size Size in bytes for the memory block to be allocated or + * reallocated + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_reallocp(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +int av_reallocp(void *ptr, size_t size); + +/** + * Allocate or reallocate an array. + * If ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate or reallocate an array through a pointer to a pointer. + * If *ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc(). + * @param ptr Pointer to the memory block which should be freed. + * @note ptr = NULL is explicitly allowed. + * @note It is recommended that you use av_freep() instead. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if it cannot be allocated. + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of nmemb * size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * The allocation will fail if nmemb * size is greater than or equal + * to INT_MAX. + * @param nmemb + * @param size + * @return Pointer to the allocated block, NULL if it cannot be allocated. + */ +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate a block of size * nmemb bytes with av_mallocz(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + * @see av_malloc_array() + */ +av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_mallocz(nmemb * size); +} + +/** + * Duplicate the string s. + * @param s string to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of s or NULL if the string cannot be allocated. + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate the buffer p. + * @param p buffer to be duplicated + * @return Pointer to a newly allocated buffer containing a + * copy of p or NULL if the buffer cannot be allocated. + */ +void *av_memdup(const void *p, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc() and set the pointer pointing to it to NULL. + * @param ptr Pointer to the pointer to the memory block which should + * be freed. + * @see av_free() + */ +void av_freep(void *ptr); + +/** + * Add an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem element to add + * @see av_dynarray2_add() + */ +void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size elem_size to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem_size size in bytes of the elements in the array + * @param elem_data pointer to the data of the element to add. If NULL, the space of + * the new added element is not filled. + * @return pointer to the data of the element to copy in the new allocated space. + * If NULL, the new allocated space is left uninitialized." + * @see av_dynarray_add() + */ +void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * Multiply two size_t values checking for overflow. + * @return 0 if success, AVERROR(EINVAL) if overflow. + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: only try the division if nelem and elsize + * are both greater than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may me allocated in one block. + */ +void av_max_alloc(size_t max); + +/** + * deliberately overlapping memcpy implementation + * @param dst destination buffer + * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 + * @param cnt number of bytes to copy, must be >= 0 + * + * cnt > back is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of back. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/murmur3.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/murmur3.h new file mode 100644 index 0000000..f29ed97 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/murmur3.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include + +struct AVMurMur3 *av_murmur3_alloc(void); +void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); +void av_murmur3_init(struct AVMurMur3 *c); +void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/old_pix_fmts.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/old_pix_fmts.h new file mode 100644 index 0000000..3ee8aec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/old_pix_fmts.h @@ -0,0 +1,175 @@ +/* + * copyright (c) 2006-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OLD_PIX_FMTS_H +#define AVUTIL_OLD_PIX_FMTS_H + +/* + * This header exists to prevent new pixel formats from being accidentally added + * to the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVPixelFormat enum instead. + */ + PIX_FMT_NONE = AV_PIX_FMT_NONE, + PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + PIX_FMT_GRAY8, ///< Y , 8bpp + PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + PIX_FMT_XVMC_MPEG2_IDCT, + PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus + //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately + //is better + PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian + PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian + PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian + PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian + PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian + PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian + PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian + PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian + PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian + + PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +#endif /* AVUTIL_OLD_PIX_FMTS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/opt.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/opt.h new file mode 100644 index 0000000..14faa6e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/opt.h @@ -0,0 +1,757 @@ +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = av_malloc(sizeof(*ret)); + * ret->class = &test_class; + * av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * av_opt_free(*foo); + * av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are av_opt_next(), which iterates + * over all options defined for one object, and av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call av_opt_child_next() recursively (and + * av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to av_opt_set() is enough. For + * non-string type options, av_opt_set() will parse the string according to the + * option type. + * + * Similarly av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This allows to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_CONST = 128, + AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), + AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), + AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational + AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), + AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), +#if FF_API_OLD_AVOPTIONS + FF_OPT_TYPE_FLAGS = 0, + FF_OPT_TYPE_INT, + FF_OPT_TYPE_INT64, + FF_OPT_TYPE_DOUBLE, + FF_OPT_TYPE_FLOAT, + FF_OPT_TYPE_STRING, + FF_OPT_TYPE_RATIONAL, + FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + FF_OPT_TYPE_CONST=128, +#endif +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + double value_min, value_max; ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count + double component_min, component_max; ///< For string this represents the unicode range for chars, 0-127 limits to ASCII + int is_range; ///< if set to 1 the struct encodes a range, if set to 0 a single value +} AVOptionRange; + +/** + * List of AVOptionRange structs + */ +typedef struct AVOptionRanges { + AVOptionRange **range; + int nb_ranges; +} AVOptionRanges; + + +#if FF_API_FIND_OPT +/** + * Look for an option in obj. Look only for the options which + * have the flags set as specified in mask and flags (that is, + * for which it is the case that (opt->flags & mask) == flags). + * + * @param[in] obj a pointer to a struct whose first element is a + * pointer to an AVClass + * @param[in] name the name of the option to look for + * @param[in] unit the unit of the option to look for, or any if NULL + * @return a pointer to the option found, or NULL if no option + * has been found + * + * @deprecated use av_opt_find. + */ +attribute_deprecated +const AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags); +#endif + +#if FF_API_OLD_AVOPTIONS +/** + * Set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an + * AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. If the field is not of a string + * type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param[out] o_out if non-NULL put here a pointer to the AVOption + * found + * @param alloc this parameter is currently ignored + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + * @deprecated use av_opt_set() + */ +attribute_deprecated +int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out); + +attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n); +attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n); +attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n); + +double av_get_double(void *obj, const char *name, const AVOption **o_out); +AVRational av_get_q(void *obj, const char *name, const AVOption **o_out); +int64_t av_get_int(void *obj, const char *name, const AVOption **o_out); +attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len); +attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last); +#endif + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void av_opt_set_defaults(void *s); + +#if FF_API_OLD_AVOPTIONS +attribute_deprecated +void av_opt_set_defaults2(void *s, int mask, int flags); +#endif + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + */ +int av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all string and binary options in obj. + */ +void av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict(void *obj, struct AVDictionary **options); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using av_free() + * @param rval parsed value; must be freed using av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the + given object first. */ +/** + * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ 0x0002 + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with av_set_string3(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *av_opt_next(void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int av_opt_set_double(void *obj, const char *name, double val, int search_flags); +int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller + */ +int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val); +int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_freep_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_free_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/parseutils.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/parseutils.h new file mode 100644 index 0000000..c80f0de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/parseutils.h @@ -0,0 +1,187 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * In particular it actually supports the parameters: + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this + * function call, or NULL in case the function fails to match all of + * the fmt string and therefore an error occurred + */ +char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixdesc.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixdesc.h new file mode 100644 index 0000000..e88bf9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixdesc.h @@ -0,0 +1,291 @@ +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include + +#include "attributes.h" +#include "pixfmt.h" + +typedef struct AVComponentDescriptor{ + uint16_t plane :2; ///< which of the 4 planes contains the component + + /** + * Number of elements between 2 horizontally consecutive pixels minus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t step_minus1 :3; + + /** + * Number of elements before the component of the first pixel plus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t offset_plus1 :3; + uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value + uint16_t depth_minus1 :4; ///< number of bits in the component minus 1 +}AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor{ + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = -((-luma_width) >> log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w) + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= -((-luma_height) >> log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + uint8_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 2 or 4 components, then alpha is last. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components, + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + */ + AVComponentDescriptor comp[4]; +}AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) +/** + * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as + * paletted internally, but the palette is generated by the decoder and is not + * stored in the file. + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) +/** + * The pixel format has an alpha channel. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +#if FF_API_PIX_FMT +/** + * @deprecated use the AV_PIX_FMT_FLAG_* flags + */ +#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE +#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL +#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM +#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL +#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR +#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB +#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL +#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA +#endif + +#if FF_API_PIX_FMT_DESC +/** + * The array of all the pixel format descriptors. + */ +extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[]; +#endif + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt); + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * See avcodec_get_chroma_sub_sample() for a function that asserts a + * valid pixel format instead of returning an error code. + * Its recommanded that you use avcodec_get_chroma_sub_sample unless + * you do check the return code! + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +void ff_check_pixfmt_descriptors(void); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixfmt.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixfmt.h new file mode 100644 index 0000000..7b17a4f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/pixfmt.h @@ -0,0 +1,397 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + * + */ + +#include "libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @par + * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + * + * @note + * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1 + * and that all newly added little-endian formats have (pix_fmt & 1) == 0. + * This allows simpler detection of big vs little-endian. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + AV_PIX_FMT_XVMC_MPEG2_IDCT, + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + + /** + * duplicated pixel formats for compatibility with libav. + * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55) + * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85) + */ + AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions + +#if FF_API_PIX_FMT +#include "old_pix_fmts.h" +#endif +}; + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV +#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV +#endif + + +#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A +#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) + +#if FF_API_PIX_FMT +#define PixelFormat AVPixelFormat + +#define PIX_FMT_Y400A AV_PIX_FMT_Y400A +#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P + +#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le) + +#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 +#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1 +#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32 +#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1 +#define PIX_FMT_0RGB32 AV_PIX_FMT_0RGB32 +#define PIX_FMT_0BGR32 AV_PIX_FMT_0BGR32 + +#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16 +#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48 +#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565 +#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555 +#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444 +#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48 +#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565 +#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555 +#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444 + +#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9 +#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9 +#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9 +#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10 +#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10 +#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10 +#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12 +#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12 +#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12 +#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14 +#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14 +#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14 +#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16 +#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16 +#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16 + +#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64 +#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64 +#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9 +#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10 +#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12 +#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14 +#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16 +#endif + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/random_seed.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/random_seed.h new file mode 100644 index 0000000..0462a04 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/random_seed.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/rational.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/rational.h new file mode 100644 index 0000000..b9800ee --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/rational.h @@ -0,0 +1,155 @@ +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * rational numbers + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_math + * @{ + */ + +/** + * rational number numerator/denominator + */ +typedef struct AVRational{ + int num; ///< numerator + int den; ///< denominator +} AVRational; + +/** + * Compare two rationals. + * @param a first rational + * @param b second rational + * @return 0 if a==b, 1 if a>b, -1 if a>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert rational to double. + * @param a rational to convert + * @return (double) a + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * This is useful for framerate calculations. + * @param dst_num destination numerator + * @param dst_den destination denominator + * @param num source numerator + * @param den source denominator + * @param max the maximum allowed for dst_num & dst_den + * @return 1 if exact, 0 otherwise + */ +int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b first rational + * @param c second rational + * @return b*c + */ +AVRational av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b first rational + * @param c second rational + * @return b/c + */ +AVRational av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b first rational + * @param c second rational + * @return b+c + */ +AVRational av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b first rational + * @param c second rational + * @return b-c + */ +AVRational av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * inf is expressed as {1,0} or {-1,0} depending on the sign. + * + * @param d double to convert + * @param max the maximum allowed numerator and denominator + * @return (AVRational) d + */ +AVRational av_d2q(double d, int max) av_const; + +/** + * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer + * than q1, 0 if they have the same distance. + */ +int av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the nearest value in q_list to q. + * @param q_list an array of rationals terminated by {0, 0} + * @return the index of the nearest value found in the array + */ +int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/ripemd.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/ripemd.h new file mode 100644 index 0000000..7b0c8bc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/ripemd.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/samplefmt.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/samplefmt.h new file mode 100644 index 0000000..db17d43 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/samplefmt.h @@ -0,0 +1,256 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include + +#include "avutil.h" +#include "attributes.h" + +/** + * Audio Sample Formats + * + * @par + * The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * @par + * The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * @par + * The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +#if FF_API_GET_BITS_PER_SAMPLE_FMT +/** + * @deprecated Use av_get_bytes_per_sample() instead. + */ +attribute_deprecated +int av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt); +#endif + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see av_samples_fill_arrays() + * @see av_samples_alloc_array_and_samples() + */ +int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see av_samples_alloc() + */ +int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha.h new file mode 100644 index 0000000..bf4377e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha512.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha512.h new file mode 100644 index 0000000..7b08701 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/sha512.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA512 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/time.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/time.h new file mode 100644 index 0000000..90eb436 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/time.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include + +/** + * Get the current time in microseconds. + */ +int64_t av_gettime(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timecode.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timecode.h new file mode 100644 index 0000000..56e3975 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timecode.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 16 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timestamp.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timestamp.h new file mode 100644 index 0000000..f63a08c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/timestamp.h @@ -0,0 +1,74 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/version.h new file mode 100644 index 0000000..dd70beb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/version.h @@ -0,0 +1,153 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +/** + * @defgroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +#define AV_PRAGMA(s) _Pragma(#s) + +/** + * @} + */ + +/** + * @defgroup version_utils Library Version Macros + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * @} + */ + + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 52 +#define LIBAVUTIL_VERSION_MINOR 48 +#define LIBAVUTIL_VERSION_MICRO 101 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @} + * + * @defgroup depr_guards Deprecation guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @{ + */ + +#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT +#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_FIND_OPT +#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_OLD_AVOPTIONS +#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT +#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CONTEXT_SIZE +#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT_DESC +#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AV_REVERSE +#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AUDIOCONVERT +#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CPU_FLAG_MMX2 +#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO +#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS_PRIVATE +#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS1 +#define FF_API_LLS1 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AVFRAME_LAVC +#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT +#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif + +/** + * @} + */ + +#endif /* AVUTIL_VERSION_H */ + diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/xtea.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/xtea.h new file mode 100644 index 0000000..0899c92 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libavutil/xtea.h @@ -0,0 +1,62 @@ +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include + +/** + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption + */ +void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/swresample.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/swresample.h new file mode 100644 index 0000000..3811301 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/swresample.h @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr Libswresample + * @{ + * + * Libswresample (lswr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * SwrContext *swr = swr_alloc(); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once all values have been set, it must be initialized with swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * as described above, or by using swr_alloc_set_opts(), then call swr_init() + * again. + * + * The conversion itself is done by repeatedly calling swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * swr_convert() with NULL in and 0 in_count. + * + * The delay between input and output, can at any time be found by using + * swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with swr_free(). + * There will be no memory leak if the data is not completely flushed before + * swr_free(). + */ + +#include +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +#if LIBSWRESAMPLE_VERSION_MAJOR < 1 +#define SWR_CH_MAX 32 ///< Maximum number of channels +#endif + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for swrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *swr_get_class(void); + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with swr_alloc_set_opts()) before calling swr_init(). + * + * @see swr_alloc_set_opts(), swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * + * @return AVERROR error code in case of failure. + */ +int swr_init(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s Swr context, can be NULL + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int log_offset, void *log_ctx); + +/** + * Free the given SwrContext and set the pointer to NULL. + */ +void swr_free(struct SwrContext **s); + +/** + * Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space then the input will be buffered. + * You can avoid this buffering by providing more output space than input. + * Convertion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers + * + * @param pts timestamp for the next input sample, INT64_MIN if unknown + * @return the output timestamp for the next output sample + */ +int64_t swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * Activate resampling compensation. + */ +int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param s allocated Swr context, not yet initialized + * @param channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return AVERROR error code in case of failure. + */ +int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return AVERROR error code in case of failure. + */ +int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * Drops the specified number of output samples. + */ +int swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + */ +int swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be + * if its set to 1 the returned delay is in seconds + * if its set to 1000 the returned delay is in milli seconds + * if its set to the input sample rate then the returned delay is in input samples + * if its set to the output sample rate then the returned delay is in output samples + * an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate) + * @returns the delay in 1/base units. + */ +int64_t swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Return the LIBSWRESAMPLE_VERSION_INT constant. + */ +unsigned swresample_version(void); + +/** + * Return the swr build-time configuration. + */ +const char *swresample_configuration(void); + +/** + * Return the swr license. + */ +const char *swresample_license(void); + +/** + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/version.h new file mode 100644 index 0000000..464c86d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWR_VERSION_H +#define SWR_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 0 +#define LIBSWRESAMPLE_VERSION_MINOR 17 +#define LIBSWRESAMPLE_VERSION_MICRO 104 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWR_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/swscale.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/swscale.h new file mode 100644 index 0000000..42702b7 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/swscale.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2001-2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup lsws + * external API header + */ + +/** + * @defgroup lsws Libswscale + * @{ + */ + +#include + +#include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "version.h" + +/** + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominace subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#if FF_API_SWS_CPU_CAPS +/** + * CPU caps are autodetected now, those flags + * are only provided for API compatibility. + */ +#define SWS_CPU_CAPS_MMX 0x80000000 +#define SWS_CPU_CAPS_MMXEXT 0x20000000 +#define SWS_CPU_CAPS_MMX2 0x20000000 +#define SWS_CPU_CAPS_3DNOW 0x40000000 +#define SWS_CPU_CAPS_ALTIVEC 0x10000000 +#define SWS_CPU_CAPS_BFIN 0x01000000 +#define SWS_CPU_CAPS_SSE2 0x02000000 +#endif + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * sws_init_context(). For filling see AVOptions, options.c and + * sws_setColorspaceDetails(). + */ +struct SwsContext *sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void sws_freeContext(struct SwsContext *swsContext); + +#if FF_API_SWS_GETCONTEXT +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + * @deprecated Use sws_getCachedContext() instead. + */ +struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); +#endif + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *sws_getGaussianVec(double variance, double quality); + +/** + * Allocate and return a vector with length coefficients, all + * with the same value c. + */ +SwsVector *sws_getConstVec(double c, int length); + +/** + * Allocate and return a vector with just one coefficient, with + * value 1.0. + */ +SwsVector *sws_getIdentityVec(void); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void sws_normalizeVec(SwsVector *a, double height); +void sws_convVec(SwsVector *a, SwsVector *b); +void sws_addVec(SwsVector *a, SwsVector *b); +void sws_subVec(SwsVector *a, SwsVector *b); +void sws_shiftVec(SwsVector *a, int shift); + +/** + * Allocate and return a clone of the vector a, that is a vector + * with the same coefficients as a. + */ +SwsVector *sws_cloneVec(SwsVector *a); + +/** + * Print with av_log() a textual representation of the vector a + * if log_level <= av_log_level. + */ +void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); + +void sws_freeVec(SwsVector *a); + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/version.h b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/version.h new file mode 100644 index 0000000..360ec82 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/include/libswscale/version.h @@ -0,0 +1,59 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWSCALE_VERSION_MAJOR 2 +#define LIBSWSCALE_VERSION_MINOR 5 +#define LIBSWSCALE_VERSION_MICRO 101 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_GETCONTEXT +#define FF_API_SWS_GETCONTEXT (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_CPU_CAPS +#define FF_API_SWS_CPU_CAPS (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_FORMAT_NAME +#define FF_API_SWS_FORMAT_NAME (LIBSWSCALE_VERSION_MAJOR < 3) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavcodec.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavcodec.a new file mode 100644 index 0000000..73dbb64 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavcodec.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavdevice.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavdevice.a new file mode 100644 index 0000000..3ba8719 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavdevice.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavfilter.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavfilter.a new file mode 100644 index 0000000..cb8c7f6 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavfilter.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavformat.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavformat.a new file mode 100644 index 0000000..5edeb67 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavformat.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavresample.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavresample.a new file mode 100644 index 0000000..ad0a376 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavutil.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavutil.a new file mode 100644 index 0000000..c2f2175 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libavutil.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswresample.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswresample.a new file mode 100644 index 0000000..6b8048d Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswscale.a b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswscale.a new file mode 100644 index 0000000..04319ae Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/armv7s/lib/libswscale.a differ diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavcodec.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavcodec.pc new file mode 100644 index 0000000..1a8d0c8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavcodec.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavcodec +Description: FFmpeg codec library +Version: 55.39.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavcodec -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavdevice.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavdevice.pc new file mode 100644 index 0000000..3d82bf0 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavdevice.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavdevice +Description: FFmpeg device handling library +Version: 55.5.100 +Requires: libavfilter = 3.90.100, libavformat = 55.19.104 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavdevice -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavfilter.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavfilter.pc new file mode 100644 index 0000000..46d8a92 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavfilter.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavfilter +Description: FFmpeg audio/video filtering library +Version: 3.90.100 +Requires: libswresample = 0.17.104, libswscale = 2.5.101, libavresample = 1.1.0, libavformat = 55.19.104, libavcodec = 55.39.101, libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavfilter -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavformat.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavformat.pc new file mode 100644 index 0000000..b3bd0ee --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavformat.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavformat +Description: FFmpeg container format library +Version: 55.19.104 +Requires: libavcodec = 55.39.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavformat -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavresample.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavresample.pc new file mode 100644 index 0000000..ac8d577 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavresample +Description: Libav audio resampling library +Version: 1.1.0 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavresample -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavutil.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavutil.pc new file mode 100644 index 0000000..c5e8e27 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libavutil.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavutil +Description: FFmpeg utility library +Version: 52.48.101 +Requires: +Requires.private: +Conflicts: +Libs: -L${libdir} -lavutil -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswresample.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswresample.pc new file mode 100644 index 0000000..e8363de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswresample +Description: FFmpeg audio resampling library +Version: 0.17.104 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswresample -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswscale.pc b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswscale.pc new file mode 100644 index 0000000..c0ee25b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/armv7s/lib/pkgconfig/libswscale.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/armv7s +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswscale +Description: FFmpeg image rescaling library +Version: 2.5.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswscale -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avcodec.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avcodec.h new file mode 100644 index 0000000..fe64b38 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avcodec.h @@ -0,0 +1,5029 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include +#include "libavutil/samplefmt.h" +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/buffer.h" +#include "libavutil/cpu.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc Encoding/Decoding Library + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + * + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of a existing codec ID changes (that would break ABI), + * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec. + * This ensures that 2 forks can independently add AVCodecIDs without producing conflicts. + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + AV_CODEC_ID_MPEG2VIDEO_XVMC, + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, + AV_CODEC_ID_IFF_BYTERUN1, + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130_DEPRECATED, + AV_CODEC_ID_G2M_DEPRECATED, + AV_CODEC_ID_WEBP_DEPRECATED, + + AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'), + AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + AV_CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + AV_CODEC_ID_EXR = MKBETAG('0','E','X','R'), + AV_CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + AV_CODEC_ID_012V = MKBETAG('0','1','2','V'), + AV_CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + AV_CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + AV_CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'), + AV_CODEC_ID_V308 = MKBETAG('V','3','0','8'), + AV_CODEC_ID_V408 = MKBETAG('V','4','0','8'), + AV_CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + AV_CODEC_ID_SANM = MKBETAG('S','A','N','M'), + AV_CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + AV_CODEC_ID_AVRN = MKBETAG('A','V','R','n'), + AV_CODEC_ID_CPIA = MKBETAG('C','P','I','A'), + AV_CODEC_ID_XFACE = MKBETAG('X','F','A','C'), + AV_CODEC_ID_SGIRLE = MKBETAG('S','G','I','R'), + AV_CODEC_ID_MVC1 = MKBETAG('M','V','C','1'), + AV_CODEC_ID_MVC2 = MKBETAG('M','V','C','2'), + AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'), + AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'), + AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'), + AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'), +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'), + AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'), + AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16), + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '), + AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '), + AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '), + AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '), + AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'), + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE + AV_CODEC_ID_VOXWARE, +#endif + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS_DEPRECATED, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK_DEPRECATED, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + AV_CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + AV_CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'), + AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'), + AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'), + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + AV_CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + AV_CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + AV_CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + AV_CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + AV_CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'), + AV_CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + AV_CODEC_ID_SUBRIP = MKBETAG('S','R','i','p'), + AV_CODEC_ID_WEBVTT = MKBETAG('W','V','T','T'), + AV_CODEC_ID_MPL2 = MKBETAG('M','P','L','2'), + AV_CODEC_ID_VPLAYER = MKBETAG('V','P','l','r'), + AV_CODEC_ID_PJS = MKBETAG('P','h','J','S'), + AV_CODEC_ID_ASS = MKBETAG('A','S','S',' '), ///< ASS as defined in Matroska + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + AV_CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + AV_CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + AV_CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'), + AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'), + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#if FF_API_CODEC_ID +#include "old_codec_ids.h" +#endif +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see avcodec_get_descriptor() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 16 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define FF_MIN_BUFFER_SIZE 16384 + + +/** + * @ingroup lavc_encoding + * motion estimation type. + */ +enum Motion_Est_ID { + ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed + ME_FULL, + ME_LOG, + ME_PHODS, + ME_EPZS, ///< enhanced predictive zonal search + ME_X1, ///< reserved for experiments + ME_HEX, ///< hexagon based search + ME_UMH, ///< uneven multi-hexagon search + ME_TESA, ///< transformed exhaustive search algorithm + ME_ITER=50, ///< iterative search +}; + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVColorPrimaries{ + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_BT470M = 4, + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, + AVCOL_PRI_NB , ///< Not part of ABI +}; + +enum AVColorTransferCharacteristic{ + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_NB , ///< Not part of ABI +}; + +/** + * X X 3 4 X X are luma samples, + * 1 2 1-6 are possible chroma positions + * X X 5 6 X 0 is undefined/unknown position + */ +enum AVChromaLocation{ + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default + AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263 + AVCHROMA_LOC_TOPLEFT = 3, ///< DV + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB , ///< Not part of ABI +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +#define FF_MAX_B_FRAMES 16 + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED 0x0001 +#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. +#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. +#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. +/** + * The parent program guarantees that the input for B-frames containing + * streams is not written to for at least s->max_b_frames+1 frames, if + * this is not set the input will be copied. + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. +#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. +#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. +#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. +#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. +#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random + location instead of only at frame boundaries. */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization. +#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT. +#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay. +#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe. +#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). +/* Fx : Flag for h263+ extra options */ +#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction +#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter +#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation +#define CODEC_FLAG_CLOSED_GOP 0x80000000 +#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. +#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. +#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. +#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!! +#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS. + +#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. +#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 0x0002 +#define CODEC_CAP_TRUNCATED 0x0008 +/* Codec can export data for HW decoding (XvMC). */ +#define CODEC_CAP_HWACCEL 0x0010 +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY 0x0020 +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME 0x0040 +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU 0x0080 +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carring such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES 0x0100 +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL 0x0200 +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF 0x0400 + +/** + * Codec is able to deal with negative linesizes + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 + +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS 0x1000 +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS 0x2000 +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE 0x4000 +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS 0x8000 +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000 +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS 0x80000000 + +//The following defines may change, don't expect compatibility if you use them. +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan{ + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +}AVPanScan; + +#define FF_QSCALE_TYPE_MPEG1 0 +#define FF_QSCALE_TYPE_MPEG2 1 +#define FF_QSCALE_TYPE_H264 2 +#define FF_QSCALE_TYPE_VP56 3 + +#if FF_API_GET_BUFFER +#define FF_BUFFER_TYPE_INTERNAL 1 +#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) +#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. +#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. + +#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). +#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. +#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. +#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). +#endif + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + AV_PKT_DATA_PALETTE, + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES=70, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, +}; + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf or destruct (deprecated) + * fields. If either is set, the packet data is dynamically allocated and is + * valid indefinitely until av_free_packet() is called (which in turn calls + * av_buffer_unref()/the destruct callback to free the data). If neither is set, + * the packet data is typically backed by some static buffer somewhere and is + * only valid for a limited time (e.g. until the next read call when demuxing). + * + * The side data is always allocated with av_malloc() and is freed in + * av_free_packet(). + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + struct { + uint8_t *data; + int size; + enum AVPacketSideDataType type; + } *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int duration; +#if FF_API_DESTRUCT_PACKET + attribute_deprecated + void (*destruct)(struct AVPacket *); + attribute_deprecated + void *priv; +#endif + int64_t pos; ///< byte position in stream, -1 if unknown + + /** + * Time difference in AVStream->time_base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current packet. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; + char codec_name[32]; + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + + /** + * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * - encoding: unused + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int stream_codec_tag; + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream. + */ + int bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * mjpeg: Huffman tables + * rv10: additional flags + * mpeg4: global headers (they can be in the bitstream or here) + * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * - encoding: MUST be set by user. + * - decoding: Set by libavcodec. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this is the number of "priming" samples added to the + * beginning of the stream. The decoded output will be delayed by this + * many samples relative to the input to the encoder. Note that this + * field is purely informational and does not directly affect the pts + * output by the encoder, which should always be based on the actual + * presentation time, including any delay. + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required. + */ + int coded_width, coded_height; + +#define FF_ASPECT_EXTENDED 15 + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec if known + */ + enum AVPixelFormat pix_fmt; + + /** + * Motion estimation algorithm used for video coding. + * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex), + * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific] + * - encoding: MUST be set by user. + * - decoding: unused + */ + int me_method; + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + + /** obsolete FIXME remove */ + int rc_strategy; +#define FF_RC_STRATEGY_XVID 1 + + int b_frame_strategy; + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + + /** + * 0-> h263 quant 1-> mpeg quant + * - encoding: Set by user. + * - decoding: unused + */ + int mpeg_quant; + + /** + * qscale factor between P and I-frames + * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + /** + * prediction method (needed for huffyuv) + * - encoding: Set by user. + * - decoding: unused + */ + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + + /** + * prepass for motion estimation + * - encoding: Set by user. + * - decoding: unused + */ + int pre_me; + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + + /** + * DTG active format information (additional aspect ratio + * information only used in DVB MPEG-2 transport streams) + * 0 if not set. + * + * - encoding: unused + * - decoding: Set by decoder. + */ + int dtg_active_format; +#define FF_DTG_AFD_SAME 8 +#define FF_DTG_AFD_4_3 9 +#define FF_DTG_AFD_16_9 10 +#define FF_DTG_AFD_14_9 11 +#define FF_DTG_AFD_4_3_SP_14_9 13 +#define FF_DTG_AFD_16_9_SP_14_9 14 +#define FF_DTG_AFD_SP_4_3 15 + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + + /** + * intra quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int intra_quant_bias; +#define FF_DEFAULT_QUANT_BIAS 999999 + + /** + * inter quantizer bias + * - encoding: Set by user. + * - decoding: unused + */ + int inter_quant_bias; + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + + /** + * XVideo Motion Acceleration + * - encoding: forbidden + * - decoding: set by decoder + */ + int xvmc_acceleration; + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + + /** + * scene change detection threshold + * 0 is default, larger means fewer detected scene changes. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_threshold; + + /** + * noise reduction strength + * - encoding: Set by user. + * - decoding: unused + */ + int noise_reduction; + + /** + * Motion estimation threshold below which no motion estimation is + * performed, but instead the user specified motion vectors are used. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_threshold; + + /** + * Macroblock threshold below which the user specified macroblock types will be used. + * - encoding: Set by user. + * - decoding: unused + */ + int mb_threshold; + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: unused + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + + /** + * Border processing masking, raises the quantizer for mbs on the borders + * of the picture. + * - encoding: Set by user. + * - decoding: unused + */ + float border_masking; + + /** + * minimum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_penalty_compensation; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + + /** + * + * - encoding: Set by user. + * - decoding: unused + */ + int brd_scale; + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + + /** + * chroma qp offset from luma + * - encoding: Set by user. + * - decoding: unused + */ + int chromaoffset; + + /** + * Multiplied by qscale for each frame and added to scene_change_score. + * - encoding: Set by user. + * - decoding: unused + */ + int scenechange_factor; + + /** + * + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + + /** + * Adjust sensitivity of b_frame_strategy 1. + * - encoding: Set by user. + * - decoding: unused + */ + int b_sensitivity; + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + +#if FF_API_REQUEST_CHANNELS + /** + * Decoder should decode to this many channels if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + * @deprecated Deprecated in favor of request_channel_layout. + */ + attribute_deprecated int request_channels; +#endif + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + +#if FF_API_GET_BUFFER + /** + * Called at the beginning of each frame to get a buffer for it. + * + * The function will set AVFrame.data[], AVFrame.linesize[]. + * AVFrame.extended_data[] must also be set, but it should be the same as + * AVFrame.data[] except for planar audio with more channels than can fit + * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as + * many data pointers as it can hold. + * + * if CODEC_CAP_DR1 is not set then get_buffer() must call + * avcodec_default_get_buffer() instead of providing buffers allocated by + * some other means. + * + * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't + * need it. avcodec_default_get_buffer() aligns the output buffer properly, + * but if get_buffer() is overridden then alignment considerations should + * be taken into account. + * + * @see avcodec_default_get_buffer() + * + * Video: + * + * If pic.reference is set then the frame will be read later by libavcodec. + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * it may be called from a different thread, but not from more than one at + * once. Does not need to be reentrant. + * + * @see release_buffer(), reget_buffer() + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * Decoders cannot use the buffer after returning from + * avcodec_decode_audio4(), so they will not call release_buffer(), as it + * is assumed to be released immediately upon return. In some rare cases, + * a decoder may need to call get_buffer() more than once in a single + * call to avcodec_decode_audio4(). In that case, when get_buffer() is + * called again after it has already been called once, the previously + * acquired buffer is assumed to be released at that time and may not be + * reused by the decoder. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated use get_buffer2() + */ + attribute_deprecated + int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called to release buffers which were allocated with get_buffer. + * A released buffer can be reused in get_buffer(). + * pic.data[*] must be set to NULL. + * May be called from a different thread if frame multithreading is used, + * but not by more than one thread at once, so does not need to be reentrant. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + * + * @deprecated custom freeing callbacks should be set from get_buffer2() + */ + attribute_deprecated + void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); + + /** + * Called at the beginning of a frame to get cr buffer for it. + * Buffer type (size, hints) must be the same. libavcodec won't check it. + * libavcodec will pass previous buffer in pic, function should return + * same buffer or new buffer with old frame "painted" into it. + * If pic.data[0] == NULL must behave like get_buffer(). + * if CODEC_CAP_DR1 is not set then reget_buffer() must call + * avcodec_default_reget_buffer() instead of providing buffers allocated by + * some other means. + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + attribute_deprecated + int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); +#endif + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with av_malloc() and will be freed in + * av_frame_unref(). + * * otherwise exended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), + * and av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * av_frame_unref(). + * + * If CODEC_CAP_DR1 is not set then get_buffer2() must call + * avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an + * edge of the size returned by avcodec_get_edge_width() on all sides. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + + /** + * ratecontrol qmin qmax limiting method + * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax. + * - encoding: Set by user. + * - decoding: unused + */ + float rc_qsquish; + + float rc_qmod_amp; + int rc_qmod_freq; + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + + /** + * rate control equation + * - encoding: Set by user + * - decoding: unused + */ + const char *rc_eq; + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int rc_min_rate; + + float rc_buffer_aggressivity; + + /** + * initial complexity for pass1 ratecontrol + * - encoding: Set by user. + * - decoding: unused + */ + float rc_initial_cplx; + + /** + * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 +#define FF_CODER_TYPE_DEFLATE 4 + /** + * coder type + * - encoding: Set by user. + * - decoding: unused + */ + int coder_type; + + /** + * context model + * - encoding: Set by user. + * - decoding: unused + */ + int context_model; + + /** + * minimum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmin; + + /** + * maximum Lagrange multipler + * - encoding: Set by user. + * - decoding: unused + */ + int lmax; + + /** + * frame skip threshold + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_threshold; + + /** + * frame skip factor + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_factor; + + /** + * frame skip exponent + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_exp; + + /** + * frame skip comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int frame_skip_cmp; + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int min_prediction_order; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int max_prediction_order; + + /** + * GOP timecode frame start number + * - encoding: Set by user, in non drop frame format + * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset) + */ + int64_t timecode_frame_start; + + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); + + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ + + /* statistics, used for 2-pass encoding */ + int mv_bits; + int header_bits; + int i_tex_bits; + int p_tex_bits; + int i_count; + int p_count; + int skip_count; + int misc_bits; + + /** + * number of bits used for the previously encoded frame + * - encoding: Set by libavcodec. + * - decoding: unused + */ + int frame_bits; + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#define FF_BUG_OLD_MSMPEG4 2 +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 + + /** + * strictly follow the standard (MPEG4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#define FF_DEBUG_MV 32 +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#define FF_DEBUG_PTS 0x00000200 +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; +#define AV_EF_CRCCHECK (1<<0) ///< verify embedded CRCs +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliancies as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * @deprecated in favor of pkt_pts + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#define FF_IDCT_SH4 9 +#define FF_IDCT_SIMPLEARM 10 +#define FF_IDCT_IPP 13 +#define FF_IDCT_XVIDMMX 14 +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#define FF_IDCT_SIMPLEVIS 18 +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#define FF_IDCT_SIMPLEALPHA 23 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + AVFrame *coded_frame; + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * thread opaque + * Can be used by execute() to store some per AVCodecContext stuff. + * - encoding: set by execute() + * - decoding: set by execute() + */ + void *thread_opaque; + + /** + * noise vs. sse weight for the nsse comparsion function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + + /** + * Simulates errors in the bitstream to test error concealment. + * - encoding: Set by user. + * - decoding: unused + */ + int error_rate; + + /** + * Current packet as passed into the decoder, to avoid having + * to pass the packet into every function. Currently only valid + * inside lavc and get/release_buffer callbacks. + * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts + * - encoding: unused + */ + AVPacket *pkt; + + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + */ + uint64_t vbv_delay; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_pkt_timebase(avctx) + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_codec_descriptor(avctx) + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * Code outside libavcodec should access this field using AVOptions + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; +} AVCodecContext; + +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +int av_codec_get_lowres(const AVCodecContext *avctx); +void av_codec_set_lowres(AVCodecContext *avctx, int val); + +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 +#if FF_API_LOWRES + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() +#endif + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from avcodec_register(). + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); +} AVCodec; + +int av_codec_get_max_lowres(const AVCodec *codec); + +/** + * AVHWAccel. + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see FF_HWACCEL_CODEC_CAP_* + */ + int capabilities; + + struct AVHWAccel *next; + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of HW accelerator private data. + * + * Private data is allocated with av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int priv_data_size; +} AVHWAccel; + +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + */ +typedef struct AVPicture { + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + + /** + * data+linesize for the bitmap of this subtitle. + * can be set for text/ass as well once they where rendered + */ + AVPicture pict; + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(const AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + + +#if FF_API_ALLOC_CONTEXT +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by simply calling av_free(). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + * + * @deprecated use avcodec_alloc_context3() + */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context(void); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +AVCodecContext *avcodec_alloc_context2(enum AVMediaType); + +/** + * Set the fields of the given AVCodecContext to default values. + * + * @param s The AVCodecContext of which the fields should be set to default values. + * @deprecated use avcodec_get_context_defaults3 + */ +attribute_deprecated +void avcodec_get_context_defaults(AVCodecContext *s); + +/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! + * we WILL change its arguments and name a few times! */ +attribute_deprecated +void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); +#endif + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct can be deallocated by calling avcodec_close() on it followed + * by av_free(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + * @see avcodec_get_context_defaults + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Set the fields of the given AVCodecContext to default values corresponding + * to the given codec (defaults may be codec-dependent). + * + * Do not call this function if a non-NULL codec has been passed + * to avcodec_alloc_context3() that allocated this AVCodecContext. + * If codec is non-NULL, it is illegal to call avcodec_open2() with a + * different codec on this AVCodecContext. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + */ +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using avcodec_free_frame(). + * + * @return An AVFrame filled with default values or NULL on failure. + * @see avcodec_get_frame_defaults + */ +AVFrame *avcodec_alloc_frame(void); + +/** + * Set the fields of the given AVFrame to default values. + * + * @param frame The AVFrame of which the fields should be set to default values. + */ +void avcodec_get_frame_defaults(AVFrame *frame); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. + * + * @param frame frame to be freed. The pointer will be set to NULL. + * + * @warning this function does NOT free the data buffers themselves + * (it does not know how, since they might have been allocated with + * a custom get_buffer()). + */ +void avcodec_free_frame(AVFrame **frame); + +#if FF_API_AVCODEC_OPEN +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated. + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open(context, codec) < 0) + * exit(1); + * @endcode + * + * @param avctx The context which will be set up to use the given codec. + * @param codec The codec to use within the context. + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close + * + * @deprecated use avcodec_open2 + */ +attribute_deprecated +int avcodec_open(AVCodecContext *avctx, AVCodec *codec); +#endif + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * avcodec_get_context_defaults3() for this context, then this + * parameter MUST be either NULL or equal to the previously passed + * codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() / + * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will + * do nothing. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +#if FF_API_DESTRUCT_PACKET +/** + * Default packet destructor. + * @deprecated use the AVBuffer API instead + */ +attribute_deprecated +void av_destruct_packet(AVPacket *pkt); +#endif + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + */ +int av_dup_packet(AVPacket *pkt); + +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet(AVPacket *dst, AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet_side_data(AVPacket *dst, AVPacket *src); + +/** + * Free a packet. + * + * @param pkt packet to free + */ +void av_free_packet(AVPacket *pkt); + +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +int av_packet_merge_side_data(AVPacket *pkt); + +int av_packet_split_side_data(AVPacket *pkt); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + * + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder_by_name(const char *name); + +#if FF_API_GET_BUFFER +attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); +attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); +#endif + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * CODEC_CAP_DR1 set. + */ +int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +/** + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. + * + * @return Required padding in pixels. + */ +unsigned avcodec_get_edge_width(void); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with CODEC_CAP_DR1 has been opened. + * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased + * according to avcodec_get_edge_width() before. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +#if FF_API_OLD_DECODE_AUDIO +/** + * Wrapper function which calls avcodec_decode_audio4. + * + * @deprecated Use avcodec_decode_audio4 instead. + * + * Decode the audio frame of size avpkt->size from avpkt->data into samples. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. In this case, + * avcodec_decode_audio3 has to be called again with an AVPacket that contains + * the remaining data in order to decode the second frame etc. + * If no frame + * could be outputted, frame_size_ptr is zero. Otherwise, it is the + * decompressed frame size in bytes. + * + * @warning You must set frame_size_ptr to the allocated size of the + * output buffer before calling avcodec_decode_audio3(). + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @warning You must not provide a custom get_buffer() when using + * avcodec_decode_audio3(). Doing so will override it with + * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, + * which does allow the application to provide a custom get_buffer(). + * + * @note You might have to align the input buffer avpkt->data and output buffer + * samples. The alignment requirements depend on the CPU: On some CPUs it isn't + * necessary at all, on others it won't work at all if not aligned and on others + * it will work but it will have an impact on performance. + * + * In practice, avpkt->data should have 4 byte alignment at minimum and + * samples should be 16 byte aligned unless the CPU doesn't need it + * (AltiVec and SSE do). + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] samples the output buffer, sample type in avctx->sample_fmt + * If the sample format is planar, each channel plane will + * be the same size, with no padding between channels. + * @param[in,out] frame_size_ptr the output buffer size in bytes + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields. + * All decoders are designed to use the least fields possible though. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame data was decompressed (used) from the input AVPacket. + */ +attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, + int *frame_size_ptr, + AVPacket *avpkt); +#endif + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + */ +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + */ +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @param avctx the codec context + * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be + freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + + /** + * Time difference in stream time base units from the pts of this + * packet to the point at which the output from the decoder has converged + * independent from the availability of previous frames. That is, the + * frames are virtually identical no matter if decoding started from + * the very first frame or from this keyframe. + * Is AV_NOPTS_VALUE if unknown. + * This field is not the display duration of the current frame. + * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY + * set. + * + * The purpose of this field is to allow seeking in streams that have no + * keyframes in the conventional sense. It corresponds to the + * recovery point SEI in H.264 and match_time_delta in NUT. It is also + * essential for some types of subtitle streams to ensure that all + * subtitles are correctly displayed after seeking. + */ + int64_t convergence_duration; + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +#if FF_API_OLD_ENCODE_AUDIO +/** + * Encode an audio frame from samples into buf. + * + * @deprecated Use avcodec_encode_audio2 instead. + * + * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. + * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user + * will know how much space is needed because it depends on the value passed + * in buf_size as described below. In that case a lower value can be used. + * + * @param avctx the codec context + * @param[out] buf the output buffer + * @param[in] buf_size the output buffer size + * @param[in] samples the input buffer containing the samples + * The number of samples read from this buffer is frame_size*channels, + * both of which are defined in avctx. + * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of + * samples read from samples is equal to: + * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) + * This also implies that av_get_bits_per_sample() must not return 0 for these + * codecs. + * @return On error a negative value is returned, on success zero or the number + * of bytes used to encode the data read from the input buffer. + */ +int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, + uint8_t *buf, int buf_size, + const short *samples); +#endif + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +#if FF_API_OLD_ENCODE_VIDEO +/** + * @deprecated use avcodec_encode_video2() instead. + * + * Encode a video frame from pict into buf. + * The input picture should be + * stored using a specific format, namely avctx.pix_fmt. + * + * @param avctx the codec context + * @param[out] buf the output buffer for the bitstream of encoded frame + * @param[in] buf_size the size of the output buffer in bytes + * @param[in] pict the input picture to encode + * @return On error a negative value is returned, on success zero or the number + * of bytes used from the output buffer. + */ +attribute_deprecated +int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVFrame *pict); +#endif + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_free_packet() (i.e. avpkt->destruct will be + * called to free the user supplied buffer). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + */ +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVCODEC_RESAMPLE +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * @deprecated use libswresample instead + * + * @{ + */ +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +attribute_deprecated +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +attribute_deprecated +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +attribute_deprecated +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +attribute_deprecated +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +attribute_deprecated +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +attribute_deprecated +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +attribute_deprecated +void av_resample_close(struct AVResampleContext *c); + +/** + * @} + */ +#endif + +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * Allocate memory for the pixels of a picture and setup the AVPicture + * fields for it. + * + * Call avpicture_free() to free it. + * + * @param picture the picture structure to be filled in + * @param pix_fmt the pixel format of the picture + * @param width the width of the picture + * @param height the height of the picture + * @return zero if successful, a negative error code otherwise + * + * @see av_image_alloc(), avpicture_fill() + */ +int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Free a picture previously allocated by avpicture_alloc(). + * The data buffer used by the AVPicture is freed, but the AVPicture structure + * itself is not. + * + * @param picture the AVPicture to be freed + */ +void avpicture_free(AVPicture *picture); + +/** + * Setup the picture fields based on the specified image parameters + * and the provided image data buffer. + * + * The picture fields are filled in by using the image data buffer + * pointed to by ptr. + * + * If ptr is NULL, the function will fill only the picture linesize + * array and return the required size for the image buffer. + * + * To allocate an image buffer and fill the picture data in one call, + * use avpicture_alloc(). + * + * @param picture the picture to be filled in + * @param ptr buffer where the image data is stored, or NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return the size in bytes required for src, a negative error code + * in case of failure + * + * @see av_image_fill_arrays() + */ +int avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Copy pixel data from an AVPicture into a buffer. + * + * avpicture_get_size() can be used to compute the required size for + * the buffer to fill. + * + * @param src source picture with filled data + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @param dest destination buffer + * @param dest_size destination buffer size in bytes + * @return the number of bytes written to dest, or a negative value + * (error code) on error, for example if the destination buffer is not + * big enough + * + * @see av_image_copy_to_buffer() + */ +int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * Calculate the size in bytes that a picture of the given width and height + * would occupy if stored in the given picture format. + * + * @param pix_fmt picture pixel format + * @param width picture width + * @param height picture height + * @return the computed picture buffer size or a negative error code + * in case of error + * + * @see av_image_get_buffer_size(). + */ +int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +#if FF_API_DEINTERLACE +/** + * deinterlace - if not supported return -1 + * + * @deprecated - use yadif (in libavfilter) instead + */ +attribute_deprecated +int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); +#endif +/** + * Copy image src to dst. Wraps av_image_copy(). + */ +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Crop image top and left side. + */ +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * Pad image. + */ +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample + * for one that returns a failure code and continues in case of invalid + * pix_fmts. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @see av_pix_fmt_get_chroma_sub_sample + */ + +void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * avcodec_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format and a selection of two destination pixel formats. When converting from + * one pixel format to another, information loss may occur. For example, when converting + * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when + * converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of + * the given pixel formats should be used to suffer the least amount of loss. + * + * If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be + * returned. + * + * @code + * src_pix_fmt = AV_PIX_FMT_YUV420P; + * dst_pix_fmt1= AV_PIX_FMT_RGB24; + * dst_pix_fmt2= AV_PIX_FMT_GRAY8; + * dst_pix_fmt3= AV_PIX_FMT_RGB8; + * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored. + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss); + * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss); + * @endcode + * + * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from + * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from + * @param[in] src_pix_fmt Source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e. + * NULL or value of zero means we care about all losses. Out: the loss + * that occurs when converting from src to selected dst pixel format. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); +#else +enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); +#endif + + +enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); + +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf_size size in bytes of buf + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + */ +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + + +typedef struct AVBitStreamFilterContext { + void *priv_data; + struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; +} AVBitStreamFilterContext; + + +typedef struct AVBitStreamFilter { + const char *name; + int priv_data_size; + int (*filter)(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + void (*close)(AVBitStreamFilterContext *bsfc); + struct AVBitStreamFilter *next; +} AVBitStreamFilter; + +/** + * Register a bitstream filter. + * + * The filter will be accessible to the application code through + * av_bitstream_filter_next() or can be directly initialized with + * av_bitstream_filter_init(). + * + * @see avcodec_register_all() + */ +void av_register_bitstream_filter(AVBitStreamFilter *bsf); + +/** + * Create and initialize a bitstream filter context given a bitstream + * filter name. + * + * The returned context must be freed with av_bitstream_filter_close(). + * + * @param name the name of the bitstream filter + * @return a bitstream filter context if a matching filter was found + * and successfully initialized, NULL otherwise + */ +AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); + +/** + * Filter bitstream. + * + * This function filters the buffer buf with size buf_size, and places the + * filtered buffer in the buffer pointed to by poutbuf. + * + * The output buffer must be freed by the caller. + * + * @param bsfc bitstream filter context created by av_bitstream_filter_init() + * @param avctx AVCodecContext accessed by the filter, may be NULL. + * If specified, this must point to the encoder context of the + * output stream the packet is sent to. + * @param args arguments which specify the filter configuration, may be NULL + * @param poutbuf pointer which is updated to point to the filtered buffer + * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes + * @param buf buffer containing the data to filter + * @param buf_size size in bytes of buf + * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data + * @return >= 0 in case of success, or a negative error code in case of failure + * + * If the return value is positive, an output buffer is allocated and + * is availble in *poutbuf, and is distinct from the input buffer. + * + * If the return value is 0, the output buffer is not allocated and + * should be considered identical to the input buffer, or in case + * *poutbuf was set it points to the input buffer (not necessarily to + * its starting address). + */ +int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + +/** + * Release bitstream filter context. + * + * @param bsf the bitstream filter context created with + * av_bitstream_filter_init(), can be NULL + */ +void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); + +/** + * If f is NULL, return the first registered bitstream filter, + * if f is non-NULL, return the next registered bitstream filter + * after f, or NULL if f is the last one. + * + * This function can be used to iterate over all registered bitstream + * filters. + */ +AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); + +/* memory */ + +/** + * Reallocate the given block if it is not large enough, otherwise do nothing. + * + * @see av_realloc + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_malloc but the buffer has additional + * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_MISSING_SAMPLE +/** + * Log a generic warning message about a missing feature. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] feature string containing the name of the missing feature + * @param[in] want_sample indicates if samples are wanted which exhibit this feature. + * If want_sample is non-zero, additional verbage will be added to the log + * message which tells the user how to report samples to the development + * mailing list. + * @deprecated Use avpriv_report_missing_feature() instead. + */ +attribute_deprecated +void av_log_missing_feature(void *avc, const char *feature, int want_sample); + +/** + * Log a generic warning message asking for a sample. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] msg string containing an optional message, or NULL if no message + * @deprecated Use avpriv_request_sample() instead. + */ +attribute_deprecated +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); +#endif /* FF_API_MISSING_SAMPLE */ + +/** + * Register the hardware accelerator hwaccel. + */ +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + */ +AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel); + + +/** + * Lock operation used by lockmgr + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. mutex points to a (void *) where the + * lockmgr should store/get a pointer to a user allocated mutex. It's + * NULL upon AV_LOCK_CREATE and != NULL for all other ops. + * + * @param cb User defined callback. Note: FFmpeg may invoke calls to this + * callback during the call to av_lockmgr_register(). + * Thus, the application must be prepared to handle that. + * If cb is set to NULL the lockmgr will be unregistered. + * Also note that during unregistration the previously registered + * lockmgr callback may also be invoked. + */ +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); + +/** + * Get the type of the given codec. + */ +enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avfft.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avfft.h new file mode 100644 index 0000000..2d20a45 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/avfft.h @@ -0,0 +1,116 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling ff_fft_calc(). + */ +void av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void av_fft_calc(FFTContext *s, FFTComplex *z); + +void av_fft_end(FFTContext *s); + +FFTContext *av_mdct_init(int nbits, int inverse, double scale); +void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); +void av_rdft_calc(RDFTContext *s, FFTSample *data); +void av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *av_dct_init(int nbits, enum DCTTransformType type); +void av_dct_calc(DCTContext *s, FFTSample *data); +void av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/dxva2.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/dxva2.h new file mode 100644 index 0000000..ac39e06 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/dxva2.h @@ -0,0 +1,95 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA_H +#define AVCODEC_DXVA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0600 +#undef _WIN32_WINNT +#endif + +#if !defined(_WIN32_WINNT) +#define _WIN32_WINNT 0x0600 +#endif + +#include +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/old_codec_ids.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/old_codec_ids.h new file mode 100644 index 0000000..d8a8f74 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/old_codec_ids.h @@ -0,0 +1,397 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_OLD_CODEC_IDS_H +#define AVCODEC_OLD_CODEC_IDS_H + +#include "libavutil/common.h" + +/* + * This header exists to prevent new codec IDs from being accidentally added to + * the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVCodecID enum instead. + */ + + CODEC_ID_NONE = AV_CODEC_ID_NONE, + + /* video codecs */ + CODEC_ID_MPEG1VIDEO, + CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding + CODEC_ID_MPEG2VIDEO_XVMC, + CODEC_ID_H261, + CODEC_ID_H263, + CODEC_ID_RV10, + CODEC_ID_RV20, + CODEC_ID_MJPEG, + CODEC_ID_MJPEGB, + CODEC_ID_LJPEG, + CODEC_ID_SP5X, + CODEC_ID_JPEGLS, + CODEC_ID_MPEG4, + CODEC_ID_RAWVIDEO, + CODEC_ID_MSMPEG4V1, + CODEC_ID_MSMPEG4V2, + CODEC_ID_MSMPEG4V3, + CODEC_ID_WMV1, + CODEC_ID_WMV2, + CODEC_ID_H263P, + CODEC_ID_H263I, + CODEC_ID_FLV1, + CODEC_ID_SVQ1, + CODEC_ID_SVQ3, + CODEC_ID_DVVIDEO, + CODEC_ID_HUFFYUV, + CODEC_ID_CYUV, + CODEC_ID_H264, + CODEC_ID_INDEO3, + CODEC_ID_VP3, + CODEC_ID_THEORA, + CODEC_ID_ASV1, + CODEC_ID_ASV2, + CODEC_ID_FFV1, + CODEC_ID_4XM, + CODEC_ID_VCR1, + CODEC_ID_CLJR, + CODEC_ID_MDEC, + CODEC_ID_ROQ, + CODEC_ID_INTERPLAY_VIDEO, + CODEC_ID_XAN_WC3, + CODEC_ID_XAN_WC4, + CODEC_ID_RPZA, + CODEC_ID_CINEPAK, + CODEC_ID_WS_VQA, + CODEC_ID_MSRLE, + CODEC_ID_MSVIDEO1, + CODEC_ID_IDCIN, + CODEC_ID_8BPS, + CODEC_ID_SMC, + CODEC_ID_FLIC, + CODEC_ID_TRUEMOTION1, + CODEC_ID_VMDVIDEO, + CODEC_ID_MSZH, + CODEC_ID_ZLIB, + CODEC_ID_QTRLE, + CODEC_ID_TSCC, + CODEC_ID_ULTI, + CODEC_ID_QDRAW, + CODEC_ID_VIXL, + CODEC_ID_QPEG, + CODEC_ID_PNG, + CODEC_ID_PPM, + CODEC_ID_PBM, + CODEC_ID_PGM, + CODEC_ID_PGMYUV, + CODEC_ID_PAM, + CODEC_ID_FFVHUFF, + CODEC_ID_RV30, + CODEC_ID_RV40, + CODEC_ID_VC1, + CODEC_ID_WMV3, + CODEC_ID_LOCO, + CODEC_ID_WNV1, + CODEC_ID_AASC, + CODEC_ID_INDEO2, + CODEC_ID_FRAPS, + CODEC_ID_TRUEMOTION2, + CODEC_ID_BMP, + CODEC_ID_CSCD, + CODEC_ID_MMVIDEO, + CODEC_ID_ZMBV, + CODEC_ID_AVS, + CODEC_ID_SMACKVIDEO, + CODEC_ID_NUV, + CODEC_ID_KMVC, + CODEC_ID_FLASHSV, + CODEC_ID_CAVS, + CODEC_ID_JPEG2000, + CODEC_ID_VMNC, + CODEC_ID_VP5, + CODEC_ID_VP6, + CODEC_ID_VP6F, + CODEC_ID_TARGA, + CODEC_ID_DSICINVIDEO, + CODEC_ID_TIERTEXSEQVIDEO, + CODEC_ID_TIFF, + CODEC_ID_GIF, + CODEC_ID_DXA, + CODEC_ID_DNXHD, + CODEC_ID_THP, + CODEC_ID_SGI, + CODEC_ID_C93, + CODEC_ID_BETHSOFTVID, + CODEC_ID_PTX, + CODEC_ID_TXD, + CODEC_ID_VP6A, + CODEC_ID_AMV, + CODEC_ID_VB, + CODEC_ID_PCX, + CODEC_ID_SUNRAST, + CODEC_ID_INDEO4, + CODEC_ID_INDEO5, + CODEC_ID_MIMIC, + CODEC_ID_RL2, + CODEC_ID_ESCAPE124, + CODEC_ID_DIRAC, + CODEC_ID_BFI, + CODEC_ID_CMV, + CODEC_ID_MOTIONPIXELS, + CODEC_ID_TGV, + CODEC_ID_TGQ, + CODEC_ID_TQI, + CODEC_ID_AURA, + CODEC_ID_AURA2, + CODEC_ID_V210X, + CODEC_ID_TMV, + CODEC_ID_V210, + CODEC_ID_DPX, + CODEC_ID_MAD, + CODEC_ID_FRWU, + CODEC_ID_FLASHSV2, + CODEC_ID_CDGRAPHICS, + CODEC_ID_R210, + CODEC_ID_ANM, + CODEC_ID_BINKVIDEO, + CODEC_ID_IFF_ILBM, + CODEC_ID_IFF_BYTERUN1, + CODEC_ID_KGV1, + CODEC_ID_YOP, + CODEC_ID_VP8, + CODEC_ID_PICTOR, + CODEC_ID_ANSI, + CODEC_ID_A64_MULTI, + CODEC_ID_A64_MULTI5, + CODEC_ID_R10K, + CODEC_ID_MXPEG, + CODEC_ID_LAGARITH, + CODEC_ID_PRORES, + CODEC_ID_JV, + CODEC_ID_DFA, + CODEC_ID_WMV3IMAGE, + CODEC_ID_VC1IMAGE, + CODEC_ID_UTVIDEO, + CODEC_ID_BMV_VIDEO, + CODEC_ID_VBLE, + CODEC_ID_DXTORY, + CODEC_ID_V410, + CODEC_ID_XWD, + CODEC_ID_CDXL, + CODEC_ID_XBM, + CODEC_ID_ZEROCODEC, + CODEC_ID_MSS1, + CODEC_ID_MSA1, + CODEC_ID_TSCC2, + CODEC_ID_MTS2, + CODEC_ID_CLLC, + CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), + CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), + CODEC_ID_EXR = MKBETAG('0','E','X','R'), + CODEC_ID_AVRP = MKBETAG('A','V','R','P'), + + CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), + CODEC_ID_AVUI = MKBETAG('A','V','U','I'), + CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), + CODEC_ID_V308 = MKBETAG('V','3','0','8'), + CODEC_ID_V408 = MKBETAG('V','4','0','8'), + CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), + CODEC_ID_SANM = MKBETAG('S','A','N','M'), + CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), + CODEC_ID_SNOW = AV_CODEC_ID_SNOW, + + /* various PCM "codecs" */ + CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + CODEC_ID_PCM_S16LE = 0x10000, + CODEC_ID_PCM_S16BE, + CODEC_ID_PCM_U16LE, + CODEC_ID_PCM_U16BE, + CODEC_ID_PCM_S8, + CODEC_ID_PCM_U8, + CODEC_ID_PCM_MULAW, + CODEC_ID_PCM_ALAW, + CODEC_ID_PCM_S32LE, + CODEC_ID_PCM_S32BE, + CODEC_ID_PCM_U32LE, + CODEC_ID_PCM_U32BE, + CODEC_ID_PCM_S24LE, + CODEC_ID_PCM_S24BE, + CODEC_ID_PCM_U24LE, + CODEC_ID_PCM_U24BE, + CODEC_ID_PCM_S24DAUD, + CODEC_ID_PCM_ZORK, + CODEC_ID_PCM_S16LE_PLANAR, + CODEC_ID_PCM_DVD, + CODEC_ID_PCM_F32BE, + CODEC_ID_PCM_F32LE, + CODEC_ID_PCM_F64BE, + CODEC_ID_PCM_F64LE, + CODEC_ID_PCM_BLURAY, + CODEC_ID_PCM_LXF, + CODEC_ID_S302M, + CODEC_ID_PCM_S8_PLANAR, + + /* various ADPCM codecs */ + CODEC_ID_ADPCM_IMA_QT = 0x11000, + CODEC_ID_ADPCM_IMA_WAV, + CODEC_ID_ADPCM_IMA_DK3, + CODEC_ID_ADPCM_IMA_DK4, + CODEC_ID_ADPCM_IMA_WS, + CODEC_ID_ADPCM_IMA_SMJPEG, + CODEC_ID_ADPCM_MS, + CODEC_ID_ADPCM_4XM, + CODEC_ID_ADPCM_XA, + CODEC_ID_ADPCM_ADX, + CODEC_ID_ADPCM_EA, + CODEC_ID_ADPCM_G726, + CODEC_ID_ADPCM_CT, + CODEC_ID_ADPCM_SWF, + CODEC_ID_ADPCM_YAMAHA, + CODEC_ID_ADPCM_SBPRO_4, + CODEC_ID_ADPCM_SBPRO_3, + CODEC_ID_ADPCM_SBPRO_2, + CODEC_ID_ADPCM_THP, + CODEC_ID_ADPCM_IMA_AMV, + CODEC_ID_ADPCM_EA_R1, + CODEC_ID_ADPCM_EA_R3, + CODEC_ID_ADPCM_EA_R2, + CODEC_ID_ADPCM_IMA_EA_SEAD, + CODEC_ID_ADPCM_IMA_EA_EACS, + CODEC_ID_ADPCM_EA_XAS, + CODEC_ID_ADPCM_EA_MAXIS_XA, + CODEC_ID_ADPCM_IMA_ISS, + CODEC_ID_ADPCM_G722, + CODEC_ID_ADPCM_IMA_APC, + CODEC_ID_VIMA = MKBETAG('V','I','M','A'), + + /* AMR */ + CODEC_ID_AMR_NB = 0x12000, + CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + CODEC_ID_RA_144 = 0x13000, + CODEC_ID_RA_288, + + /* various DPCM codecs */ + CODEC_ID_ROQ_DPCM = 0x14000, + CODEC_ID_INTERPLAY_DPCM, + CODEC_ID_XAN_DPCM, + CODEC_ID_SOL_DPCM, + + /* audio codecs */ + CODEC_ID_MP2 = 0x15000, + CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + CODEC_ID_AAC, + CODEC_ID_AC3, + CODEC_ID_DTS, + CODEC_ID_VORBIS, + CODEC_ID_DVAUDIO, + CODEC_ID_WMAV1, + CODEC_ID_WMAV2, + CODEC_ID_MACE3, + CODEC_ID_MACE6, + CODEC_ID_VMDAUDIO, + CODEC_ID_FLAC, + CODEC_ID_MP3ADU, + CODEC_ID_MP3ON4, + CODEC_ID_SHORTEN, + CODEC_ID_ALAC, + CODEC_ID_WESTWOOD_SND1, + CODEC_ID_GSM, ///< as in Berlin toast format + CODEC_ID_QDM2, + CODEC_ID_COOK, + CODEC_ID_TRUESPEECH, + CODEC_ID_TTA, + CODEC_ID_SMACKAUDIO, + CODEC_ID_QCELP, + CODEC_ID_WAVPACK, + CODEC_ID_DSICINAUDIO, + CODEC_ID_IMC, + CODEC_ID_MUSEPACK7, + CODEC_ID_MLP, + CODEC_ID_GSM_MS, /* as found in WAV */ + CODEC_ID_ATRAC3, + CODEC_ID_VOXWARE, + CODEC_ID_APE, + CODEC_ID_NELLYMOSER, + CODEC_ID_MUSEPACK8, + CODEC_ID_SPEEX, + CODEC_ID_WMAVOICE, + CODEC_ID_WMAPRO, + CODEC_ID_WMALOSSLESS, + CODEC_ID_ATRAC3P, + CODEC_ID_EAC3, + CODEC_ID_SIPR, + CODEC_ID_MP1, + CODEC_ID_TWINVQ, + CODEC_ID_TRUEHD, + CODEC_ID_MP4ALS, + CODEC_ID_ATRAC1, + CODEC_ID_BINKAUDIO_RDFT, + CODEC_ID_BINKAUDIO_DCT, + CODEC_ID_AAC_LATM, + CODEC_ID_QDMC, + CODEC_ID_CELT, + CODEC_ID_G723_1, + CODEC_ID_G729, + CODEC_ID_8SVX_EXP, + CODEC_ID_8SVX_FIB, + CODEC_ID_BMV_AUDIO, + CODEC_ID_RALF, + CODEC_ID_IAC, + CODEC_ID_ILBC, + CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), + CODEC_ID_SONIC = MKBETAG('S','O','N','C'), + CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), + CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), + CODEC_ID_OPUS = MKBETAG('O','P','U','S'), + + /* subtitle codecs */ + CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + CODEC_ID_DVD_SUBTITLE = 0x17000, + CODEC_ID_DVB_SUBTITLE, + CODEC_ID_TEXT, ///< raw UTF-8 text + CODEC_ID_XSUB, + CODEC_ID_SSA, + CODEC_ID_MOV_TEXT, + CODEC_ID_HDMV_PGS_SUBTITLE, + CODEC_ID_DVB_TELETEXT, + CODEC_ID_SRT, + CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), + CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), + CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), + CODEC_ID_SAMI = MKBETAG('S','A','M','I'), + CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), + CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), + + /* other specific kind of codecs (generally used for attachments) */ + CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + CODEC_ID_TTF = 0x18000, + CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), + CODEC_ID_XBIN = MKBETAG('X','B','I','N'), + CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), + CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), + + CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it + + CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + +#endif /* AVCODEC_OLD_CODEC_IDS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vaapi.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vaapi.h new file mode 100644 index 0000000..815a27e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vaapi.h @@ -0,0 +1,173 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +struct vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; + + /** + * VAPictureParameterBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t pic_param_buf_id; + + /** + * VAIQMatrixBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t iq_matrix_buf_id; + + /** + * VABitPlaneBuffer ID (for VC-1 decoding) + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t bitplane_buf_id; + + /** + * Slice parameter/data buffer IDs + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t *slice_buf_ids; + + /** + * Number of effective slice buffer IDs to send to the HW + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int n_slice_buf_ids; + + /** + * Size of pre-allocated slice_buf_ids + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_buf_ids_alloc; + + /** + * Pointer to VASliceParameterBuffers + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + void *slice_params; + + /** + * Size of a VASliceParameterBuffer element + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_param_size; + + /** + * Size of pre-allocated slice_params + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_params_alloc; + + /** + * Number of slices currently filled in + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + unsigned int slice_count; + + /** + * Pointer to slice data buffer base + * - encoding: unused + * - decoding: Set by libavcodec + */ + const uint8_t *slice_data; + + /** + * Current size of slice data + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + uint32_t slice_data_size; +}; + +/* @} */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vda.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vda.h new file mode 100644 index 0000000..b3d6399 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vda.h @@ -0,0 +1,162 @@ +/* + * VDA HW acceleration + * + * copyright (c) 2011 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDA_H +#define AVCODEC_VDA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + +#include + +// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes +// http://openradar.appspot.com/8026390 +#undef __GNUC_STDC_INLINE__ + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "libavcodec/version.h" + +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +/** + * This structure is used to provide the necessary configurations and data + * to the VDA FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct vda_context { + /** + * VDA decoder object. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + VDADecoder decoder; + + /** + * The Core Video pixel buffer that contains the current image data. + * + * encoding: unused + * decoding: Set by libavcodec. Unset by user. + */ + CVPixelBufferRef cv_buffer; + + /** + * Use the hardware decoder in synchronous mode. + * + * encoding: unused + * decoding: Set by user. + */ + int use_sync_decoding; + + /** + * The frame width. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int width; + + /** + * The frame height. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int height; + + /** + * The frame format. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int format; + + /** + * The pixel format for output image buffers. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + OSType cv_pix_fmt_type; + + /** + * The current bitstream buffer. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + uint8_t *priv_bitstream; + + /** + * The current size of the bitstream. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_bitstream_size; + + /** + * The reference size used for fast reallocation. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + int priv_allocated_size; + + /** + * Use av_buffer to manage buffer. + * When the flag is set, the CVPixelBuffers returned by the decoder will + * be released automatically, so you have to retain them if necessary. + * Not setting this flag may cause memory leak. + * + * encoding: unused + * decoding: Set by user. + */ + int use_ref_buffer; +}; + +/** Create the video decoder. */ +int ff_vda_create_decoder(struct vda_context *vda_ctx, + uint8_t *extradata, + int extradata_size); + +/** Destroy the video decoder. */ +int ff_vda_destroy_decoder(struct vda_context *vda_ctx); + +/** + * @} + */ + +#endif /* AVCODEC_VDA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vdpau.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vdpau.h new file mode 100644 index 0000000..b1c836c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/vdpau.h @@ -0,0 +1,195 @@ +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include +#include +#include "libavutil/avconfig.h" +#include "libavutil/attributes.h" + +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU 1 +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU 1 +#endif + +#if FF_API_BUFS_VDPAU +union AVVDPAUPictureInfo { + VdpPictureInfoH264 h264; + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoVC1 vc1; + VdpPictureInfoMPEG4Part2 mpeg4; +}; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + +#if FF_API_BUFS_VDPAU + /** + * VDPAU picture information + * + * Set by libavcodec. + */ + attribute_deprecated + union AVVDPAUPictureInfo info; + + /** + * Allocated size of the bitstream_buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_allocated; + + /** + * Useful bitstream buffers in the bitstream buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_used; + + /** + * Table of bitstream buffers. + * The user is responsible for freeing this buffer using av_freep(). + * + * Set by libavcodec. + */ + attribute_deprecated + VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +#if FF_API_CAP_VDPAU +/** @brief The videoSurface is used for rendering. */ +#define FF_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * @brief The videoSurface is needed for reference/prediction. + * The codec manipulates this. + */ +#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 + +/** + * @brief This structure is used as a callback between the FFmpeg + * decoder (vd_) and presentation (vo_) module. + * This is used for defining a video frame containing surface, + * picture parameter, bitstream information etc which are passed + * between the FFmpeg decoder and its clients. + */ +struct vdpau_render_state { + VdpVideoSurface surface; ///< Used as rendered surface, never changed. + + int state; ///< Holds FF_VDPAU_STATE_* values. + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif + + /** Describe size/location of the compressed video data. + Set to 0 when freeing bitstream_buffers. */ + int bitstream_buffers_allocated; + int bitstream_buffers_used; + /** The user is responsible for freeing this buffer using av_freep(). */ + VdpBitstreamBuffer *bitstream_buffers; + +#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; +#endif +}; +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/version.h new file mode 100644 index 0000000..63f6346 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/version.h @@ -0,0 +1,104 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "libavutil/avutil.h" + +#define LIBAVCODEC_VERSION_MAJOR 55 +#define LIBAVCODEC_VERSION_MINOR 39 +#define LIBAVCODEC_VERSION_MICRO 101 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_REQUEST_CHANNELS +#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ALLOC_CONTEXT +#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_AVCODEC_OPEN +#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55) +#endif +#ifndef FF_API_OLD_DECODE_AUDIO +#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_TIMECODE +#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55) +#endif + +#ifndef FF_API_OLD_ENCODE_AUDIO +#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_ENCODE_VIDEO +#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CODEC_ID +#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_AVCODEC_RESAMPLE +#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DEINTERLACE +#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DESTRUCT_PACKET +#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_GET_BUFFER +#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_MISSING_SAMPLE +#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif + +#endif /* AVCODEC_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/xvmc.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/xvmc.h new file mode 100644 index 0000000..b2bf518 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavcodec/xvmc.h @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include + +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/avdevice.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/avdevice.h new file mode 100644 index 0000000..93a044f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/avdevice.h @@ -0,0 +1,69 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_AVDEVICE_H +#define AVDEVICE_AVDEVICE_H + +#include "version.h" + +/** + * @file + * @ingroup lavd + * Main libavdevice API header + */ + +/** + * @defgroup lavd Special devices muxing/demuxing library + * @{ + * Libavdevice is a complementary library to @ref libavf "libavformat". It + * provides various "special" platform-specific muxers and demuxers, e.g. for + * grabbing devices, audio capture and playback etc. As a consequence, the + * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own + * I/O functions). The filename passed to avformat_open_input() often does not + * refer to an actually existing file, but has some special device-specific + * meaning - e.g. for x11grab it is the display name. + * + * To use libavdevice, simply call avdevice_register_all() to register all + * compiled muxers and demuxers. They all use standard libavformat API. + * @} + */ + +#include "libavformat/avformat.h" + +/** + * Return the LIBAVDEVICE_VERSION_INT constant. + */ +unsigned avdevice_version(void); + +/** + * Return the libavdevice build-time configuration. + */ +const char *avdevice_configuration(void); + +/** + * Return the libavdevice license. + */ +const char *avdevice_license(void); + +/** + * Initialize libavdevice and register all the input and output devices. + * @warning This function is not thread safe. + */ +void avdevice_register_all(void); + +#endif /* AVDEVICE_AVDEVICE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/version.h new file mode 100644 index 0000000..4f4ec99 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavdevice/version.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVDEVICE_VERSION_H +#define AVDEVICE_VERSION_H + +/** + * @file + * @ingroup lavd + * Libavdevice version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVDEVICE_VERSION_MAJOR 55 +#define LIBAVDEVICE_VERSION_MINOR 5 +#define LIBAVDEVICE_VERSION_MICRO 100 + +#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \ + LIBAVDEVICE_VERSION_MINOR, \ + LIBAVDEVICE_VERSION_MICRO) +#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT + +#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#endif /* AVDEVICE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/asrc_abuffer.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/asrc_abuffer.h new file mode 100644 index 0000000..aa34461 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/asrc_abuffer.h @@ -0,0 +1,91 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_ASRC_ABUFFER_H +#define AVFILTER_ASRC_ABUFFER_H + +#include "avfilter.h" + +/** + * @file + * memory buffer source for audio + * + * @deprecated use buffersrc.h instead. + */ + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param data pointers to the samples planes + * @param linesize linesizes of each audio buffer plane + * @param nb_samples number of samples per channel + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param planar flag to indicate if audio data is planar or packed + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc, + uint8_t *data[8], int linesize[8], + int nb_samples, int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * This is similar to av_asrc_buffer_add_samples(), but the samples + * are stored in a buffer with known size. + * + * @param abuffersrc audio source buffer context + * @param buf pointer to the samples data, packed is assumed + * @param size the size in bytes of the buffer, it must contain an + * integer number of samples + * @param sample_fmt sample format of the audio data + * @param ch_layout channel layout of the audio data + * @param pts presentation timestamp of the audio buffer + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc, + uint8_t *buf, int buf_size, + int sample_rate, + int sample_fmt, int64_t ch_layout, int planar, + int64_t pts, int av_unused flags); + +/** + * Queue an audio buffer to the audio buffer source. + * + * @param abuffersrc audio source buffer context + * @param samplesref buffer ref to queue + * @param flags unused + * + * @deprecated use av_buffersrc_add_ref() instead. + */ +attribute_deprecated +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc, + AVFilterBufferRef *samplesref, + int av_unused flags); + +#endif /* AVFILTER_ASRC_ABUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avcodec.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avcodec.h new file mode 100644 index 0000000..8bbdad2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avcodec.h @@ -0,0 +1,110 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVCODEC_H +#define AVFILTER_AVCODEC_H + +/** + * @file + * libavcodec/libavfilter gluing utilities + * + * This should be included in an application ONLY if the installed + * libavfilter has been compiled with libavcodec support, otherwise + * symbols defined below will not be available. + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms); + + +/** + * Create and return a picref reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame, + int perms); + +/** + * Create and return a buffer reference from the data and properties + * contained in frame. + * + * @param perms permissions to assign to the new buffer reference + * @deprecated avfilter APIs work natively with AVFrame instead. + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, + const AVFrame *frame, + int perms); +#endif + +#if FF_API_FILL_FRAME +/** + * Fill an AVFrame with the information stored in samplesref. + * + * @param frame an already allocated AVFrame + * @param samplesref an audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *samplesref); + +/** + * Fill an AVFrame with the information stored in picref. + * + * @param frame an already allocated AVFrame + * @param picref a video buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *picref); + +/** + * Fill an AVFrame with information stored in ref. + * + * @param frame an already allocated AVFrame + * @param ref a video or audio buffer reference + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + * @deprecated Use avfilter_copy_buf_props() instead. + */ +attribute_deprecated +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame, + const AVFilterBufferRef *ref); +#endif + +#endif /* AVFILTER_AVCODEC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfilter.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfilter.h new file mode 100644 index 0000000..412b123 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfilter.h @@ -0,0 +1,1520 @@ +/* + * filter layer + * Copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTER_H +#define AVFILTER_AVFILTER_H + +/** + * @file + * @ingroup lavfi + * Main libavfilter public API header + */ + +/** + * @defgroup lavfi Libavfilter - graph-based frame editing library + * @{ + */ + +#include + +#include "libavutil/attributes.h" +#include "libavutil/avutil.h" +#include "libavutil/dict.h" +#include "libavutil/frame.h" +#include "libavutil/log.h" +#include "libavutil/samplefmt.h" +#include "libavutil/pixfmt.h" +#include "libavutil/rational.h" + +#include "libavfilter/version.h" + +/** + * Return the LIBAVFILTER_VERSION_INT constant. + */ +unsigned avfilter_version(void); + +/** + * Return the libavfilter build-time configuration. + */ +const char *avfilter_configuration(void); + +/** + * Return the libavfilter license. + */ +const char *avfilter_license(void); + +typedef struct AVFilterContext AVFilterContext; +typedef struct AVFilterLink AVFilterLink; +typedef struct AVFilterPad AVFilterPad; +typedef struct AVFilterFormats AVFilterFormats; + +#if FF_API_AVFILTERBUFFER +/** + * A reference-counted buffer data type used by the filter system. Filters + * should not store pointers to this structure directly, but instead use the + * AVFilterBufferRef structure below. + */ +typedef struct AVFilterBuffer { + uint8_t *data[8]; ///< buffer data for each plane/channel + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + /** private data to be used by a custom free function */ + void *priv; + /** + * A pointer to the function to deallocate this buffer if the default + * function is not sufficient. This could, for example, add the memory + * back into a memory pool to be reused later without the overhead of + * reallocating it from scratch. + */ + void (*free)(struct AVFilterBuffer *buf); + + int format; ///< media format + int w, h; ///< width and height of the allocated buffer + unsigned refcount; ///< number of references to this buffer +} AVFilterBuffer; + +#define AV_PERM_READ 0x01 ///< can read from the buffer +#define AV_PERM_WRITE 0x02 ///< can write to the buffer +#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer +#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time +#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time +#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes +#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned + +#define AVFILTER_ALIGN 16 //not part of ABI + +/** + * Audio specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, audio specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefAudioProps { + uint64_t channel_layout; ///< channel layout of audio buffer + int nb_samples; ///< number of audio samples per channel + int sample_rate; ///< audio buffer sample rate + int channels; ///< number of channels (do not access directly) +} AVFilterBufferRefAudioProps; + +/** + * Video specific properties in a reference to an AVFilterBuffer. Since + * AVFilterBufferRef is common to different media formats, video specific + * per reference properties must be separated out. + */ +typedef struct AVFilterBufferRefVideoProps { + int w; ///< image width + int h; ///< image height + AVRational sample_aspect_ratio; ///< sample aspect ratio + int interlaced; ///< is frame interlaced + int top_field_first; ///< field order + enum AVPictureType pict_type; ///< picture type of the frame + int key_frame; ///< 1 -> keyframe, 0-> not + int qp_table_linesize; ///< qp_table stride + int qp_table_size; ///< qp_table size + int8_t *qp_table; ///< array of Quantization Parameters +} AVFilterBufferRefVideoProps; + +/** + * A reference to an AVFilterBuffer. Since filters can manipulate the origin of + * a buffer to, for example, crop image without any memcpy, the buffer origin + * and dimensions are per-reference properties. Linesize is also useful for + * image flipping, frame to field filters, etc, and so is also per-reference. + * + * TODO: add anything necessary for frame reordering + */ +typedef struct AVFilterBufferRef { + AVFilterBuffer *buf; ///< the buffer that this is a reference to + uint8_t *data[8]; ///< picture/audio data for each plane + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data will always be set, but for planar + * audio with more channels that can fit in data, extended_data must be used + * in order to access all channels. + */ + uint8_t **extended_data; + int linesize[8]; ///< number of bytes per line + + AVFilterBufferRefVideoProps *video; ///< video buffer specific properties + AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties + + /** + * presentation timestamp. The time unit may change during + * filtering, as it is specified in the link and the filter code + * may need to rescale the PTS accordingly. + */ + int64_t pts; + int64_t pos; ///< byte position in stream, -1 if unknown + + int format; ///< media format + + int perms; ///< permissions, see the AV_PERM_* flags + + enum AVMediaType type; ///< media type of buffer data + + AVDictionary *metadata; ///< dictionary containing metadata key=value tags +} AVFilterBufferRef; + +/** + * Copy properties of src to dst, without copying the actual data + */ +attribute_deprecated +void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src); + +/** + * Add a new reference to a buffer. + * + * @param ref an existing reference to the buffer + * @param pmask a bitmask containing the allowable permissions in the new + * reference + * @return a new reference to the buffer with the same properties as the + * old, excluding any permissions denied by pmask + */ +attribute_deprecated +AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask); + +/** + * Remove a reference to a buffer. If this is the last reference to the + * buffer, the buffer itself is also automatically freed. + * + * @param ref reference to the buffer, may be NULL + * + * @note it is recommended to use avfilter_unref_bufferp() instead of this + * function + */ +attribute_deprecated +void avfilter_unref_buffer(AVFilterBufferRef *ref); + +/** + * Remove a reference to a buffer and set the pointer to NULL. + * If this is the last reference to the buffer, the buffer itself + * is also automatically freed. + * + * @param ref pointer to the buffer reference + */ +attribute_deprecated +void avfilter_unref_bufferp(AVFilterBufferRef **ref); +#endif + +/** + * Get the number of channels of a buffer reference. + */ +attribute_deprecated +int avfilter_ref_get_channels(AVFilterBufferRef *ref); + +#if FF_API_AVFILTERPAD_PUBLIC +/** + * A filter pad used for either input or output. + * + * See doc/filter_design.txt for details on how to implement the methods. + * + * @warning this struct might be removed from public API. + * users should call avfilter_pad_get_name() and avfilter_pad_get_type() + * to access the name and type fields; there should be no need to access + * any other fields from outside of libavfilter. + */ +struct AVFilterPad { + /** + * Pad name. The name is unique among inputs and among outputs, but an + * input may have the same name as an output. This may be NULL if this + * pad has no need to ever be referenced by name. + */ + const char *name; + + /** + * AVFilterPad type. + */ + enum AVMediaType type; + + /** + * Input pads: + * Minimum required permissions on incoming buffers. Any buffer with + * insufficient permissions will be automatically copied by the filter + * system to a new buffer which provides the needed access permissions. + * + * Output pads: + * Guaranteed permissions on outgoing buffers. Any buffer pushed on the + * link must have at least these permissions; this fact is checked by + * asserts. It can be used to optimize buffer allocation. + */ + attribute_deprecated int min_perms; + + /** + * Input pads: + * Permissions which are not accepted on incoming buffers. Any buffer + * which has any of these permissions set will be automatically copied + * by the filter system to a new buffer which does not have those + * permissions. This can be used to easily disallow buffers with + * AV_PERM_REUSE. + * + * Output pads: + * Permissions which are automatically removed on outgoing buffers. It + * can be used to optimize buffer allocation. + */ + attribute_deprecated int rej_perms; + + /** + * @deprecated unused + */ + int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref); + + /** + * Callback function to get a video buffer. If NULL, the filter system will + * use ff_default_get_video_buffer(). + * + * Input video pads only. + */ + AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h); + + /** + * Callback function to get an audio buffer. If NULL, the filter system will + * use ff_default_get_audio_buffer(). + * + * Input audio pads only. + */ + AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples); + + /** + * @deprecated unused + */ + int (*end_frame)(AVFilterLink *link); + + /** + * @deprecated unused + */ + int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir); + + /** + * Filtering callback. This is where a filter receives a frame with + * audio/video data and should do its processing. + * + * Input pads only. + * + * @return >= 0 on success, a negative AVERROR on error. This function + * must ensure that frame is properly unreferenced on error if it + * hasn't been passed on to another filter. + */ + int (*filter_frame)(AVFilterLink *link, AVFrame *frame); + + /** + * Frame poll callback. This returns the number of immediately available + * samples. It should return a positive value if the next request_frame() + * is guaranteed to return one frame (with no delay). + * + * Defaults to just calling the source poll_frame() method. + * + * Output pads only. + */ + int (*poll_frame)(AVFilterLink *link); + + /** + * Frame request callback. A call to this should result in at least one + * frame being output over the given link. This should return zero on + * success, and another value on error. + * See ff_request_frame() for the error codes with a specific + * meaning. + * + * Output pads only. + */ + int (*request_frame)(AVFilterLink *link); + + /** + * Link configuration callback. + * + * For output pads, this should set the following link properties: + * video: width, height, sample_aspect_ratio, time_base + * audio: sample_rate. + * + * This should NOT set properties such as format, channel_layout, etc which + * are negotiated between filters by the filter system using the + * query_formats() callback before this function is called. + * + * For input pads, this should check the properties of the link, and update + * the filter's internal state as necessary. + * + * For both input and output pads, this should return zero on success, + * and another value on error. + */ + int (*config_props)(AVFilterLink *link); + + /** + * The filter expects a fifo to be inserted on its input link, + * typically because it has a delay. + * + * input pads only. + */ + int needs_fifo; + + int needs_writable; +}; +#endif + +/** + * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g. + * AVFilter.inputs/outputs). + */ +int avfilter_pad_count(const AVFilterPad *pads); + +/** + * Get the name of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array it; is the caller's + * responsibility to ensure the index is valid + * + * @return name of the pad_idx'th pad in pads + */ +const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx); + +/** + * Get the type of an AVFilterPad. + * + * @param pads an array of AVFilterPads + * @param pad_idx index of the pad in the array; it is the caller's + * responsibility to ensure the index is valid + * + * @return type of the pad_idx'th pad in pads + */ +enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx); + +/** + * The number of the filter inputs is not determined just by AVFilter.inputs. + * The filter might add additional inputs during initialization depending on the + * options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0) +/** + * The number of the filter outputs is not determined just by AVFilter.outputs. + * The filter might add additional outputs during initialization depending on + * the options supplied to it. + */ +#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1) +/** + * The filter supports multithreading by splitting frames into multiple parts + * and processing them concurrently. + */ +#define AVFILTER_FLAG_SLICE_THREADS (1 << 2) +/** + * Some filters support a generic "enable" expression option that can be used + * to enable or disable a filter in the timeline. Filters supporting this + * option have this flag set. When the enable expression is false, the default + * no-op filter_frame() function is called in place of the filter_frame() + * callback defined on each input pad, thus the frame is passed unchanged to + * the next filters. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16) +/** + * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will + * have its filter_frame() callback(s) called as usual even when the enable + * expression is false. The filter will disable filtering within the + * filter_frame() callback(s) itself, for example executing code depending on + * the AVFilterContext->is_disabled value. + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17) +/** + * Handy mask to test whether the filter supports or no the timeline feature + * (internally or generically). + */ +#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL) + +/** + * Filter definition. This defines the pads a filter contains, and all the + * callback functions used to interact with the filter. + */ +typedef struct AVFilter { + /** + * Filter name. Must be non-NULL and unique among filters. + */ + const char *name; + + /** + * A description of the filter. May be NULL. + * + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *description; + + /** + * List of inputs, terminated by a zeroed element. + * + * NULL if there are no (static) inputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in + * this list. + */ + const AVFilterPad *inputs; + /** + * List of outputs, terminated by a zeroed element. + * + * NULL if there are no (static) outputs. Instances of filters with + * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in + * this list. + */ + const AVFilterPad *outputs; + + /** + * A class for the private data, used to declare filter private AVOptions. + * This field is NULL for filters that do not declare any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavfilter generic + * code to this class. + */ + const AVClass *priv_class; + + /** + * A combination of AVFILTER_FLAG_* + */ + int flags; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Filter initialization function. + * + * This callback will be called only once during the filter lifetime, after + * all the options have been set, but before links between filters are + * established and format negotiation is done. + * + * Basic filter initialization should be done here. Filters with dynamic + * inputs and/or outputs should create those inputs/outputs here based on + * provided options. No more changes to this filter's inputs/outputs can be + * done after this callback. + * + * This callback must not assume that the filter links exist or frame + * parameters are known. + * + * @ref AVFilter.uninit "uninit" is guaranteed to be called even if + * initialization fails, so this callback does not have to clean up on + * failure. + * + * @return 0 on success, a negative AVERROR on failure + */ + int (*init)(AVFilterContext *ctx); + + /** + * Should be set instead of @ref AVFilter.init "init" by the filters that + * want to pass a dictionary of AVOptions to nested contexts that are + * allocated during init. + * + * On return, the options dict should be freed and replaced with one that + * contains all the options which could not be processed by this filter (or + * with NULL if all the options were processed). + * + * Otherwise the semantics is the same as for @ref AVFilter.init "init". + */ + int (*init_dict)(AVFilterContext *ctx, AVDictionary **options); + + /** + * Filter uninitialization function. + * + * Called only once right before the filter is freed. Should deallocate any + * memory held by the filter, release any buffer references, etc. It does + * not need to deallocate the AVFilterContext.priv memory itself. + * + * This callback may be called even if @ref AVFilter.init "init" was not + * called or failed, so it must be prepared to handle such a situation. + */ + void (*uninit)(AVFilterContext *ctx); + + /** + * Query formats supported by the filter on its inputs and outputs. + * + * This callback is called after the filter is initialized (so the inputs + * and outputs are fixed), shortly before the format negotiation. This + * callback may be called more than once. + * + * This callback must set AVFilterLink.out_formats on every input link and + * AVFilterLink.in_formats on every output link to a list of pixel/sample + * formats that the filter supports on that link. For audio links, this + * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" / + * @ref AVFilterLink.out_samplerates "out_samplerates" and + * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" / + * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously. + * + * This callback may be NULL for filters with one input, in which case + * libavfilter assumes that it supports all input formats and preserves + * them on output. + * + * @return zero on success, a negative value corresponding to an + * AVERROR code otherwise + */ + int (*query_formats)(AVFilterContext *); + + int priv_size; ///< size of private data to allocate for the filter + + /** + * Used by the filter registration system. Must not be touched by any other + * code. + */ + struct AVFilter *next; + + /** + * Make the filter instance process a command. + * + * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported. + * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be + * time consuming then a filter should treat it like an unsupported command + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ + int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags); + + /** + * Filter initialization function, alternative to the init() + * callback. Args contains the user-supplied parameters, opaque is + * used for providing binary data. + */ + int (*init_opaque)(AVFilterContext *ctx, void *opaque); +} AVFilter; + +/** + * Process multiple parts of the frame concurrently. + */ +#define AVFILTER_THREAD_SLICE (1 << 0) + +typedef struct AVFilterInternal AVFilterInternal; + +/** An instance of a filter */ +struct AVFilterContext { + const AVClass *av_class; ///< needed for av_log() and filters common options + + const AVFilter *filter; ///< the AVFilter of which this is an instance + + char *name; ///< name of this filter instance + + AVFilterPad *input_pads; ///< array of input pads + AVFilterLink **inputs; ///< array of pointers to input links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs +#endif + unsigned nb_inputs; ///< number of input pads + + AVFilterPad *output_pads; ///< array of output pads + AVFilterLink **outputs; ///< array of pointers to output links +#if FF_API_FOO_COUNT + attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs +#endif + unsigned nb_outputs; ///< number of output pads + + void *priv; ///< private data for use by the filter + + struct AVFilterGraph *graph; ///< filtergraph this filter belongs to + + /** + * Type of multithreading being allowed/used. A combination of + * AVFILTER_THREAD_* flags. + * + * May be set by the caller before initializing the filter to forbid some + * or all kinds of multithreading for this filter. The default is allowing + * everything. + * + * When the filter is initialized, this field is combined using bit AND with + * AVFilterGraph.thread_type to get the final mask used for determining + * allowed threading types. I.e. a threading type needs to be set in both + * to be allowed. + * + * After the filter is initialzed, libavfilter sets this field to the + * threading type that is actually used (0 for no multithreading). + */ + int thread_type; + + /** + * An opaque struct for libavfilter internal use. + */ + AVFilterInternal *internal; + + struct AVFilterCommand *command_queue; + + char *enable_str; ///< enable expression string + void *enable; ///< parsed expression (AVExpr*) + double *var_values; ///< variable values for the enable expression + int is_disabled; ///< the enabled state from the last expression evaluation +}; + +/** + * A link between two filters. This contains pointers to the source and + * destination filters between which this link exists, and the indexes of + * the pads involved. In addition, this link also contains the parameters + * which have been negotiated and agreed upon between the filter, such as + * image dimensions, format, etc. + */ +struct AVFilterLink { + AVFilterContext *src; ///< source filter + AVFilterPad *srcpad; ///< output pad on the source filter + + AVFilterContext *dst; ///< dest filter + AVFilterPad *dstpad; ///< input pad on the dest filter + + enum AVMediaType type; ///< filter media type + + /* These parameters apply only to video */ + int w; ///< agreed upon image width + int h; ///< agreed upon image height + AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio + /* These parameters apply only to audio */ + uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h) + int sample_rate; ///< samples per second + + int format; ///< agreed upon media format + + /** + * Define the time base used by the PTS of the frames/samples + * which will pass through this link. + * During the configuration stage, each filter is supposed to + * change only the output timebase, while the timebase of the + * input link is assumed to be an unchangeable property. + */ + AVRational time_base; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavfilter and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + /** + * Lists of formats and channel layouts supported by the input and output + * filters respectively. These lists are used for negotiating the format + * to actually be used, which will be loaded into the format and + * channel_layout members, above, when chosen. + * + */ + AVFilterFormats *in_formats; + AVFilterFormats *out_formats; + + /** + * Lists of channel layouts and sample rates used for automatic + * negotiation. + */ + AVFilterFormats *in_samplerates; + AVFilterFormats *out_samplerates; + struct AVFilterChannelLayouts *in_channel_layouts; + struct AVFilterChannelLayouts *out_channel_layouts; + + /** + * Audio only, the destination filter sets this to a non-zero value to + * request that buffers with the given number of samples should be sent to + * it. AVFilterPad.needs_fifo must also be set on the corresponding input + * pad. + * Last buffer before EOF will be padded with silence. + */ + int request_samples; + + /** stage of the initialization of the link properties (dimensions, etc) */ + enum { + AVLINK_UNINIT = 0, ///< not started + AVLINK_STARTINIT, ///< started, but incomplete + AVLINK_INIT ///< complete + } init_state; + + struct AVFilterPool *pool; + + /** + * Graph the filter belongs to. + */ + struct AVFilterGraph *graph; + + /** + * Current timestamp of the link, as defined by the most recent + * frame(s), in AV_TIME_BASE units. + */ + int64_t current_pts; + + /** + * Index in the age array. + */ + int age_index; + + /** + * Frame rate of the stream on the link, or 1/0 if unknown; + * if left to 0/0, will be automatically be copied from the first input + * of the source filter if it exists. + * + * Sources should set it to the best estimation of the real frame rate. + * Filters should update it if necessary depending on their function. + * Sinks can use it to set a default output frame rate. + * It is similar to the r_frame_rate field in AVStream. + */ + AVRational frame_rate; + + /** + * Buffer partially filled with samples to achieve a fixed/minimum size. + */ + AVFrame *partial_buf; + + /** + * Size of the partial buffer to allocate. + * Must be between min_samples and max_samples. + */ + int partial_buf_size; + + /** + * Minimum number of samples to filter at once. If filter_frame() is + * called with fewer samples, it will accumulate them in partial_buf. + * This field and the related ones must not be changed after filtering + * has started. + * If 0, all related fields are ignored. + */ + int min_samples; + + /** + * Maximum number of samples to filter at once. If filter_frame() is + * called with more samples, it will split them. + */ + int max_samples; + + /** + * The buffer reference currently being received across the link by the + * destination filter. This is used internally by the filter system to + * allow automatic copying of buffers which do not have sufficient + * permissions for the destination. This should not be accessed directly + * by the filters. + */ + AVFilterBufferRef *cur_buf_copy; + + /** + * True if the link is closed. + * If set, all attemps of start_frame, filter_frame or request_frame + * will fail with AVERROR_EOF, and if necessary the reference will be + * destroyed. + * If request_frame returns AVERROR_EOF, this flag is set on the + * corresponding link. + * It can be set also be set by either the source or the destination + * filter. + */ + int closed; + + /** + * Number of channels. + */ + int channels; + + /** + * True if a frame is being requested on the link. + * Used internally by the framework. + */ + unsigned frame_requested; + + /** + * Link processing flags. + */ + unsigned flags; + + /** + * Number of past frames sent through the link. + */ + int64_t frame_count; +}; + +/** + * Link two filters together. + * + * @param src the source filter + * @param srcpad index of the output pad on the source filter + * @param dst the destination filter + * @param dstpad index of the input pad on the destination filter + * @return zero on success + */ +int avfilter_link(AVFilterContext *src, unsigned srcpad, + AVFilterContext *dst, unsigned dstpad); + +/** + * Free the link in *link, and set its pointer to NULL. + */ +void avfilter_link_free(AVFilterLink **link); + +/** + * Get the number of channels of a link. + */ +int avfilter_link_get_channels(AVFilterLink *link); + +/** + * Set the closed field of a link. + */ +void avfilter_link_set_closed(AVFilterLink *link, int closed); + +/** + * Negotiate the media format, dimensions, etc of all inputs to a filter. + * + * @param filter the filter to negotiate the properties for its inputs + * @return zero on successful negotiation + */ +int avfilter_config_links(AVFilterContext *filter); + +#if FF_API_AVFILTERBUFFER +/** + * Create a buffer reference wrapped around an already allocated image + * buffer. + * + * @param data pointers to the planes of the image to reference + * @param linesize linesizes for the planes of the image to reference + * @param perms the required access permissions + * @param w the width of the image specified by the data and linesize arrays + * @param h the height of the image specified by the data and linesize arrays + * @param format the pixel format of the image specified by the data and linesize arrays + */ +attribute_deprecated +AVFilterBufferRef * +avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms, + int w, int h, enum AVPixelFormat format); + +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version + * that can handle unknown channel layouts. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channel_layout the channel layout of the buffer + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + uint64_t channel_layout); +/** + * Create an audio buffer reference wrapped around an already + * allocated samples buffer. + * + * @param data pointers to the samples plane buffers + * @param linesize linesize for the samples plane buffers + * @param perms the required access permissions + * @param nb_samples number of samples per channel + * @param sample_fmt the format of each sample in the buffer to allocate + * @param channels the number of channels of the buffer + * @param channel_layout the channel layout of the buffer, + * must be either 0 or consistent with channels + */ +attribute_deprecated +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data, + int linesize, + int perms, + int nb_samples, + enum AVSampleFormat sample_fmt, + int channels, + uint64_t channel_layout); + +#endif + + +#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically +#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw) + +/** + * Make the filter instance process a command. + * It is recommended to use avfilter_graph_send_command(). + */ +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** Initialize the filter system. Register all builtin filters. */ +void avfilter_register_all(void); + +#if FF_API_OLD_FILTER_REGISTER +/** Uninitialize the filter system. Unregister all filters. */ +attribute_deprecated +void avfilter_uninit(void); +#endif + +/** + * Register a filter. This is only needed if you plan to use + * avfilter_get_by_name later to lookup the AVFilter structure by name. A + * filter can still by instantiated with avfilter_graph_alloc_filter even if it + * is not registered. + * + * @param filter the filter to register + * @return 0 if the registration was successful, a negative value + * otherwise + */ +int avfilter_register(AVFilter *filter); + +/** + * Get a filter definition matching the given name. + * + * @param name the filter name to find + * @return the filter definition, if any matching one is registered. + * NULL if none found. + */ +AVFilter *avfilter_get_by_name(const char *name); + +/** + * Iterate over all registered filters. + * @return If prev is non-NULL, next registered filter after prev or NULL if + * prev is the last filter. If prev is NULL, return the first registered filter. + */ +const AVFilter *avfilter_next(const AVFilter *prev); + +#if FF_API_OLD_FILTER_REGISTER +/** + * If filter is NULL, returns a pointer to the first registered filter pointer, + * if filter is non-NULL, returns the next pointer after filter. + * If the returned pointer points to NULL, the last registered filter + * was already reached. + * @deprecated use avfilter_next() + */ +attribute_deprecated +AVFilter **av_filter_next(AVFilter **filter); +#endif + +#if FF_API_AVFILTER_OPEN +/** + * Create a filter instance. + * + * @param filter_ctx put here a pointer to the created filter context + * on success, NULL on failure + * @param filter the filter to create an instance of + * @param inst_name Name to give to the new instance. Can be NULL for none. + * @return >= 0 in case of success, a negative error code otherwise + * @deprecated use avfilter_graph_alloc_filter() instead + */ +attribute_deprecated +int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name); +#endif + + +#if FF_API_AVFILTER_INIT_FILTER +/** + * Initialize a filter. + * + * @param filter the filter to initialize + * @param args A string of parameters to use when initializing the filter. + * The format and meaning of this string varies by filter. + * @param opaque Any extra non-string data needed by the filter. The meaning + * of this parameter varies by filter. + * @return zero on success + */ +attribute_deprecated +int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque); +#endif + +/** + * Initialize a filter with the supplied parameters. + * + * @param ctx uninitialized filter context to initialize + * @param args Options to initialize the filter with. This must be a + * ':'-separated list of options in the 'key=value' form. + * May be NULL if the options have been set directly using the + * AVOptions API or there are no options that need to be set. + * @return 0 on success, a negative AVERROR on failure + */ +int avfilter_init_str(AVFilterContext *ctx, const char *args); + +/** + * Initialize a filter with the supplied dictionary of options. + * + * @param ctx uninitialized filter context to initialize + * @param options An AVDictionary filled with options for this filter. On + * return this parameter will be destroyed and replaced with + * a dict containing options that were not found. This dictionary + * must be freed by the caller. + * May be NULL, then this function is equivalent to + * avfilter_init_str() with the second parameter set to NULL. + * @return 0 on success, a negative AVERROR on failure + * + * @note This function and avfilter_init_str() do essentially the same thing, + * the difference is in manner in which the options are passed. It is up to the + * calling code to choose whichever is more preferable. The two functions also + * behave differently when some of the provided options are not declared as + * supported by the filter. In such a case, avfilter_init_str() will fail, but + * this function will leave those extra options in the options AVDictionary and + * continue as usual. + */ +int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options); + +/** + * Free a filter context. This will also remove the filter from its + * filtergraph's list of filters. + * + * @param filter the filter to free + */ +void avfilter_free(AVFilterContext *filter); + +/** + * Insert a filter in the middle of an existing link. + * + * @param link the link into which the filter should be inserted + * @param filt the filter to be inserted + * @param filt_srcpad_idx the input pad on the filter to connect + * @param filt_dstpad_idx the output pad on the filter to connect + * @return zero on success + */ +int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, + unsigned filt_srcpad_idx, unsigned filt_dstpad_idx); + +#if FF_API_AVFILTERBUFFER +/** + * Copy the frame properties of src to dst, without copying the actual + * image data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src); + +/** + * Copy the frame properties and data pointers of src to dst, without copying + * the actual data. + * + * @return 0 on success, a negative number on error. + */ +attribute_deprecated +int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src); +#endif + +/** + * @return AVClass for AVFilterContext. + * + * @see av_opt_find(). + */ +const AVClass *avfilter_get_class(void); + +typedef struct AVFilterGraphInternal AVFilterGraphInternal; + +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + +typedef struct AVFilterGraph { + const AVClass *av_class; +#if FF_API_FOO_COUNT + attribute_deprecated + unsigned filter_count_unused; +#endif + AVFilterContext **filters; +#if !FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters + char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters +#if FF_API_FOO_COUNT + unsigned nb_filters; +#endif + + /** + * Type of multithreading allowed for filters in this graph. A combination + * of AVFILTER_THREAD_* flags. + * + * May be set by the caller at any point, the setting will apply to all + * filters initialized after that. The default is allowing everything. + * + * When a filter in this graph is initialized, this field is combined using + * bit AND with AVFilterContext.thread_type to get the final mask used for + * determining allowed threading types. I.e. a threading type needs to be + * set in both to be allowed. + */ + int thread_type; + + /** + * Maximum number of threads used by filters in this graph. May be set by + * the caller before adding any filters to the filtergraph. Zero (the + * default) means that the number of threads is determined automatically. + */ + int nb_threads; + + /** + * Opaque object for libavfilter internal use. + */ + AVFilterGraphInternal *internal; + + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions + + /** + * Private fields + * + * The following fields are for internal use only. + * Their type, offset, number and semantic can change without notice. + */ + + AVFilterLink **sink_links; + int sink_links_count; + + unsigned disable_auto_convert; +} AVFilterGraph; + +/** + * Allocate a filter graph. + */ +AVFilterGraph *avfilter_graph_alloc(void); + +/** + * Create a new filter instance in a filter graph. + * + * @param graph graph in which the new filter will be used + * @param filter the filter to create an instance of + * @param name Name to give to the new instance (will be copied to + * AVFilterContext.name). This may be used by the caller to identify + * different filters, libavfilter itself assigns no semantics to + * this parameter. May be NULL. + * + * @return the context of the newly created filter instance (note that it is + * also retrievable directly through AVFilterGraph.filters or with + * avfilter_graph_get_filter()) on success or NULL or failure. + */ +AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph, + const AVFilter *filter, + const char *name); + +/** + * Get a filter instance with name name from graph. + * + * @return the pointer to the found filter instance or NULL if it + * cannot be found. + */ +AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name); + +#if FF_API_AVFILTER_OPEN +/** + * Add an existing filter instance to a filter graph. + * + * @param graphctx the filter graph + * @param filter the filter to be added + * + * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a + * filter graph + */ +attribute_deprecated +int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); +#endif + +/** + * Create and add a filter instance into an existing graph. + * The filter instance is created from the filter filt and inited + * with the parameters args and opaque. + * + * In case of success put in *filt_ctx the pointer to the created + * filter instance, otherwise set *filt_ctx to NULL. + * + * @param name the instance name to give to the created filter instance + * @param graph_ctx the filter graph + * @return a negative AVERROR error code in case of failure, a non + * negative value otherwise + */ +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, + const char *name, const char *args, void *opaque, + AVFilterGraph *graph_ctx); + +/** + * Enable or disable automatic format conversion inside the graph. + * + * Note that format conversion can still happen inside explicitly inserted + * scale and aresample filters. + * + * @param flags any of the AVFILTER_AUTO_CONVERT_* constants + */ +void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags); + +enum { + AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */ + AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */ +}; + +/** + * Check validity and configure all the links and formats in the graph. + * + * @param graphctx the filter graph + * @param log_ctx context used for logging + * @return >= 0 in case of success, a negative AVERROR code otherwise + */ +int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); + +/** + * Free a graph, destroy its links, and set *graph to NULL. + * If *graph is NULL, do nothing. + */ +void avfilter_graph_free(AVFilterGraph **graph); + +/** + * A linked-list of the inputs/outputs of the filter chain. + * + * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(), + * where it is used to communicate open (unlinked) inputs and outputs from and + * to the caller. + * This struct specifies, per each not connected pad contained in the graph, the + * filter context and the pad index required for establishing a link. + */ +typedef struct AVFilterInOut { + /** unique name for this input/output in the list */ + char *name; + + /** filter context associated to this input/output */ + AVFilterContext *filter_ctx; + + /** index of the filt_ctx pad to use for linking */ + int pad_idx; + + /** next input/input in the list, NULL if this is the last */ + struct AVFilterInOut *next; +} AVFilterInOut; + +/** + * Allocate a single AVFilterInOut entry. + * Must be freed with avfilter_inout_free(). + * @return allocated AVFilterInOut on success, NULL on failure. + */ +AVFilterInOut *avfilter_inout_alloc(void); + +/** + * Free the supplied list of AVFilterInOut and set *inout to NULL. + * If *inout is NULL, do nothing. + */ +void avfilter_inout_free(AVFilterInOut **inout); + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE +/** + * Add a graph described by a string to a graph. + * + * @note The caller must provide the lists of inputs and outputs, + * which therefore must be known before calling the function. + * + * @note The inputs parameter describes inputs of the already existing + * part of the graph; i.e. from the point of view of the newly created + * part, they are outputs. Similarly the outputs parameter describes + * outputs of the already existing filters, which are provided as + * inputs to the parsed filters. + * + * @param graph the filter graph where to link the parsed grap context + * @param filters string to be parsed + * @param inputs linked list to the inputs of the graph + * @param outputs linked list to the outputs of the graph + * @return zero on success, a negative AVERROR code on error + */ +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut *inputs, AVFilterInOut *outputs, + void *log_ctx); +#else +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + * @deprecated Use avfilter_graph_parse_ptr() instead. + */ +attribute_deprecated +int avfilter_graph_parse(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); +#endif + +/** + * Add a graph described by a string to a graph. + * + * @param graph the filter graph where to link the parsed graph context + * @param filters string to be parsed + * @param inputs pointer to a linked list to the inputs of the graph, may be NULL. + * If non-NULL, *inputs is updated to contain the list of open inputs + * after the parsing, should be freed with avfilter_inout_free(). + * @param outputs pointer to a linked list to the outputs of the graph, may be NULL. + * If non-NULL, *outputs is updated to contain the list of open outputs + * after the parsing, should be freed with avfilter_inout_free(). + * @return non negative on success, a negative AVERROR code on error + */ +int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, AVFilterInOut **outputs, + void *log_ctx); + +/** + * Add a graph described by a string to a graph. + * + * @param[in] graph the filter graph where to link the parsed graph context + * @param[in] filters string to be parsed + * @param[out] inputs a linked list of all free (unlinked) inputs of the + * parsed graph will be returned here. It is to be freed + * by the caller using avfilter_inout_free(). + * @param[out] outputs a linked list of all free (unlinked) outputs of the + * parsed graph will be returned here. It is to be freed by the + * caller using avfilter_inout_free(). + * @return zero on success, a negative AVERROR code on error + * + * @note This function returns the inputs and outputs that are left + * unlinked after parsing the graph and the caller then deals with + * them. + * @note This function makes no reference whatsoever to already + * existing parts of the graph and the inputs parameter will on return + * contain inputs of the newly parsed part of the graph. Analogously + * the outputs parameter will contain outputs of the newly created + * filters. + */ +int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters, + AVFilterInOut **inputs, + AVFilterInOut **outputs); + +/** + * Send a command to one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only + * @param arg the argument for the command + * @param res a buffer with size res_size where the filter(s) can return a response. + * + * @returns >=0 on success otherwise an error code. + * AVERROR(ENOSYS) on unsupported commands + */ +int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags); + +/** + * Queue a command for one or more filter instances. + * + * @param graph the filter graph + * @param target the filter(s) to which the command should be sent + * "all" sends to all filters + * otherwise it can be a filter or filter instance name + * which will send the command to all matching filters. + * @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only + * @param arg the argument for the command + * @param ts time at which the command should be sent to the filter + * + * @note As this executes commands after this function returns, no return code + * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported. + */ +int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts); + + +/** + * Dump a graph into a human-readable string representation. + * + * @param graph the graph to dump + * @param options formatting options; currently ignored + * @return a string, or NULL in case of memory allocation failure; + * the string must be freed using av_free + */ +char *avfilter_graph_dump(AVFilterGraph *graph, const char *options); + +/** + * Request a frame on the oldest sink link. + * + * If the request returns AVERROR_EOF, try the next. + * + * Note that this function is not meant to be the sole scheduling mechanism + * of a filtergraph, only a convenience function to help drain a filtergraph + * in a balanced way under normal circumstances. + * + * Also note that AVERROR_EOF does not mean that frames did not arrive on + * some of the sinks during the process. + * When there are multiple sink links, in case the requested link + * returns an EOF, this may cause a filter to flush pending frames + * which are sent to another sink link, although unrequested. + * + * @return the return value of ff_request_frame(), + * or AVERROR_EOF if all links returned AVERROR_EOF + */ +int avfilter_graph_request_oldest(AVFilterGraph *graph); + +/** + * @} + */ + +#endif /* AVFILTER_AVFILTER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfiltergraph.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfiltergraph.h new file mode 100644 index 0000000..b31d581 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/avfiltergraph.h @@ -0,0 +1,28 @@ +/* + * Filter graphs + * copyright (c) 2007 Bobby Bingham + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_AVFILTERGRAPH_H +#define AVFILTER_AVFILTERGRAPH_H + +#include "avfilter.h" +#include "libavutil/log.h" + +#endif /* AVFILTER_AVFILTERGRAPH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersink.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersink.h new file mode 100644 index 0000000..ce96d08 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersink.h @@ -0,0 +1,186 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSINK_H +#define AVFILTER_BUFFERSINK_H + +/** + * @file + * memory buffer sink API for audio and video + */ + +#include "avfilter.h" + +#if FF_API_AVFILTERBUFFER +/** + * Get an audio/video buffer data from buffer_sink and put it in bufref. + * + * This function works with both audio and video buffer sinks. + * + * @param buffer_sink pointer to a buffersink or abuffersink context + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +attribute_deprecated +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink, + AVFilterBufferRef **bufref, int flags); + +/** + * Get the number of immediately available frames. + */ +attribute_deprecated +int av_buffersink_poll_frame(AVFilterContext *ctx); + +/** + * Get a buffer with filtered data from sink and put it in buf. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +attribute_deprecated +int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf); + +/** + * Same as av_buffersink_read, but with the ability to specify the number of + * samples read. This function is less efficient than av_buffersink_read(), + * because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param buf pointer to the buffer will be written here if buf is non-NULL. buf + * must be freed by the caller using avfilter_unref_buffer(). buf + * will contain exactly nb_samples audio samples, except at the end + * of stream, when it can contain less than nb_samples. + * Buf may also be NULL to query whether a buffer is ready to be + * output. + * + * @warning do not mix this function with av_buffersink_read(). Use only one or + * the other with a single sink, not both. + */ +attribute_deprecated +int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf, + int nb_samples); +#endif + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a buffersink or abuffersink filter context. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags + * + * @return >= 0 in for success, a negative AVERROR code for failure. + */ +int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags); + +/** + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer + * reference, but not remove it from the buffer. This is useful if you + * need only to read a video/samples buffer, without to fetch it. + */ +#define AV_BUFFERSINK_FLAG_PEEK 1 + +/** + * Tell av_buffersink_get_buffer_ref() not to request a frame from its input. + * If a frame is already buffered, it is read (and removed from the buffer), + * but if no frame is present, return AVERROR(EAGAIN). + */ +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2 + +/** + * Struct to use for initializing a buffersink context. + */ +typedef struct { + const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE +} AVBufferSinkParams; + +/** + * Create an AVBufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVBufferSinkParams *av_buffersink_params_alloc(void); + +/** + * Struct to use for initializing an abuffersink context. + */ +typedef struct { + const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE + const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1 + const int *channel_counts; ///< list of allowed channel counts, terminated by -1 + int all_channel_counts; ///< if not 0, accept any channel count or layout + int *sample_rates; ///< list of allowed sample rates, terminated by -1 +} AVABufferSinkParams; + +/** + * Create an AVABufferSinkParams structure. + * + * Must be freed with av_free(). + */ +AVABufferSinkParams *av_abuffersink_params_alloc(void); + +/** + * Set the frame size for an audio buffer sink. + * + * All calls to av_buffersink_get_buffer_ref will return a buffer with + * exactly the specified number of samples, or AVERROR(EAGAIN) if there is + * not enough. The last buffer at EOF will be padded with 0. + */ +void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size); + +/** + * Get the frame rate of the input. + */ +AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); + +/** + * Get a frame with filtered data from sink and put it in frame. + * + * @param ctx pointer to a context of a buffersink or abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure. + */ +int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Same as av_buffersink_get_frame(), but with the ability to specify the number + * of samples read. This function is less efficient than + * av_buffersink_get_frame(), because it copies the data around. + * + * @param ctx pointer to a context of the abuffersink AVFilter. + * @param frame pointer to an allocated frame that will be filled with data. + * The data must be freed using av_frame_unref() / av_frame_free() + * frame will contain exactly nb_samples audio samples, except at + * the end of stream, when it can contain less than nb_samples. + * + * @warning do not mix this function with av_buffersink_get_frame(). Use only one or + * the other with a single sink, not both. + */ +int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); + +#endif /* AVFILTER_BUFFERSINK_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersrc.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersrc.h new file mode 100644 index 0000000..89613e1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/buffersrc.h @@ -0,0 +1,148 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_BUFFERSRC_H +#define AVFILTER_BUFFERSRC_H + +/** + * @file + * Memory buffer source API. + */ + +#include "libavcodec/avcodec.h" +#include "avfilter.h" + +enum { + + /** + * Do not check for format changes. + */ + AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1, + +#if FF_API_AVFILTERBUFFER + /** + * Ignored + */ + AV_BUFFERSRC_FLAG_NO_COPY = 2, +#endif + + /** + * Immediately push the frame to the output. + */ + AV_BUFFERSRC_FLAG_PUSH = 4, + + /** + * Keep a reference to the frame. + * If the frame if reference-counted, create a new reference; otherwise + * copy the frame data. + */ + AV_BUFFERSRC_FLAG_KEEP_REF = 8, + +}; + +/** + * Add buffer data in picref to buffer_src. + * + * @param buffer_src pointer to a buffer source context + * @param picref a buffer reference, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_ref(AVFilterContext *buffer_src, + AVFilterBufferRef *picref, int flags); + +/** + * Get the number of failed requests. + * + * A failed request is when the request_frame method is called while no + * frame is present in the buffer. + * The number is reset when a frame is added. + */ +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); + +#if FF_API_AVFILTERBUFFER +/** + * Add a buffer to the filtergraph s. + * + * @param buf buffer containing frame data to be passed down the filtergraph. + * This function will take ownership of buf, the user must not free it. + * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. + * + * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() + */ +attribute_deprecated +int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +#endif + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will make a new reference to it. Otherwise the frame data will be + * copied. + * + * @return 0 on success, a negative AVERROR on error + * + * This function is equivalent to av_buffersrc_add_frame_flags() with the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * @param s an instance of the buffersrc filter. + * @param frame frame to be added. If the frame is reference counted, this + * function will take ownership of the reference(s) and reset the frame. + * Otherwise the frame data will be copied. If this function returns an error, + * the input frame is not touched. + * + * @return 0 on success, a negative AVERROR on error. + * + * @note the difference between this function and av_buffersrc_write_frame() is + * that av_buffersrc_write_frame() creates a new reference to the input frame, + * while this function takes ownership of the reference passed to it. + * + * This function is equivalent to av_buffersrc_add_frame_flags() without the + * AV_BUFFERSRC_FLAG_KEEP_REF flag. + */ +int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame); + +/** + * Add a frame to the buffer source. + * + * By default, if the frame is reference-counted, this function will take + * ownership of the reference(s) and reset the frame. This can be controled + * using the flags. + * + * If this function returns an error, the input frame is not touched. + * + * @param buffer_src pointer to a buffer source context + * @param frame a frame, or NULL to mark EOF + * @param flags a combination of AV_BUFFERSRC_FLAG_* + * @return >= 0 in case of success, a negative AVERROR code + * in case of failure + */ +int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, + AVFrame *frame, int flags); + + +#endif /* AVFILTER_BUFFERSRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/version.h new file mode 100644 index 0000000..541b869 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavfilter/version.h @@ -0,0 +1,89 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFILTER_VERSION_H +#define AVFILTER_VERSION_H + +/** + * @file + * @ingroup lavfi + * Libavfilter version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFILTER_VERSION_MAJOR 3 +#define LIBAVFILTER_VERSION_MINOR 90 +#define LIBAVFILTER_VERSION_MICRO 100 + +#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \ + LIBAVFILTER_VERSION_MINOR, \ + LIBAVFILTER_VERSION_MICRO) +#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT + +#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_AVFILTERPAD_PUBLIC +#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FOO_COUNT +#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_FILL_FRAME +#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_BUFFERSRC_BUFFER +#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTERBUFFER +#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_OPTS +#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_ACONVERT_FILTER +#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_OPEN +#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_AVFILTER_INIT_FILTER +#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_FILTER_REGISTER +#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_OLD_GRAPH_PARSE +#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif +#ifndef FF_API_DRAWTEXT_OLD_TIMELINE +#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 4) +#endif + +#endif /* AVFILTER_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avformat.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avformat.h new file mode 100644 index 0000000..4e5683c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avformat.h @@ -0,0 +1,2239 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf I/O and Muxing/Demuxing Library + * @{ + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the av_iformat_next() / + * av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * Lavf allows to configure muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL or filename, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * av_dict_set(&options, "video_size", "640x480", 0); + * av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or + * avcodec_decode_subtitle2() if the caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with av_free_packet() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + * + */ + +#include +#include /* FILE */ +#include "libavcodec/avcodec.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "avio.h" +#include "libavformat/version.h" + +struct AVFormatContext; + + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- . + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct AVFrac { + int64_t val, num, den; +} AVFrac; + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for + raw picture data. */ +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#if LIBAVFORMAT_VERSION_MAJOR <= 54 +#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks +#else +#define AVFMT_TS_NONSTRICT 0x20000 +#endif + /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in av_write_frame and + av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. Only used in raw format right + * now. 'avformat_new_stream' should be called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The single packet associated with it will be returned + * among the first few packets read from the file unless seeking takes place. + * It can also be accessed at any time in AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; + /** + * Codec context associated with this stream. Allocated and freed by + * libavformat. + * + * - decoding: The demuxer exports codec information stored in the headers + * here. + * - encoding: The user sets codec information, the muxer writes it to the + * output. Mandatory fields as specified in AVCodecContext + * documentation must be set even if this AVCodecContext is + * not actually used for encoding. + */ + AVCodecContext *codec; + void *priv_data; + + /** + * encoding: pts generation when outputting stream + */ + struct AVFrac pts; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: set by libavformat in avformat_write_header. The muxer may use the + * user-provided value of @ref AVCodecContext.time_base "codec->time_base" + * as a hint. + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Stream information used internally by av_find_stream_info() + */ +#define MAX_STD_TIMEBASES (60*12+6) + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t reference_dts; + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ +#define MAX_PROBE_PACKETS 2500 + int probe_packets; + + /** + * Number of frames that have been demuxed during av_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + * + * Code outside avformat should access this field using: + * av_stream_get/set_r_frame_rate(stream) + */ + AVRational r_frame_rate; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + +} AVStream; + +AVRational av_stream_get_r_frame_rate(const AVStream *s); +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + */ +typedef struct AVFormatContext { + /** + * A class for logging and AVOptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * Can only be iformat or oformat, not both at the same time. + * + * decoding: set by avformat_open_input(). + * encoding: set by the user. + */ + struct AVInputFormat *iformat; + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + */ + void *priv_data; + + /** + * I/O context. + * + * decoding: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * encoding: set by the user. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ + + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * decoding: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * encoding: streams are created by the user before avformat_write_header(). + */ + unsigned int nb_streams; + AVStream **streams; + + char filename[1024]; /**< input or output filename */ + + /** + * Decoding: position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + */ + int64_t duration; + + /** + * Decoding: total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int bit_rate; + + unsigned int packet_size; + int max_delay; + + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. + + /** + * decoding: size of data to probe; encoding: unused. + */ + unsigned int probesize; + + /** + * decoding: maximum time (in AV_TIME_BASE units) during which the input should + * be analyzed in avformat_find_stream_info(). + */ + int max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * muxing : unused + * demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * muxing : set by user + * demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the unix epoch (00:00 1st January 1970). That is, pts=0 + * in the stream was captured at this real world time. + * - encoding: Set by user. + * - decoding: Unused. + */ + int64_t start_time_realtime; + + /** + * decoding: number of frames used to probe fps + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * decoding: set by the user before avformat_open_input(). + * encoding: set by the user before avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int use_wallclock_as_timestamps; + + /** + * Avoid negative timestamps during muxing. + * 0 -> allow negative timestamps + * 1 -> avoid negative timestamps + * -1 -> choose automatically (default) + * Note, this only works when interleave_packet_per_dts is in use. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int avoid_negative_ts; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user via AVOptions (NO direct access) + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + unsigned int skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user via AVOPtions (NO direct access) + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + */ + int probe_score; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * This buffer is only needed when packets were already buffered but + * not decoded, for example to get the codec parameters in MPEG + * streams. + */ + struct AVPacketList *packet_buffer; + struct AVPacketList *packet_buffer_end; + + /* av_seek_frame() support */ + int64_t data_offset; /**< offset of the first packet */ + + /** + * Raw packets from the demuxer, prior to parsing and decoding. + * This buffer is used for buffering packets until the codec can + * be identified, as parsing cannot be done without knowing the + * codec. + */ + struct AVPacketList *raw_packet_buffer; + struct AVPacketList *raw_packet_buffer_end; + /** + * Packets split by the parser get queued here. + */ + struct AVPacketList *parse_queue; + struct AVPacketList *parse_queue_end; + /** + * Remaining size available for raw_packet_buffer, in bytes. + */ +#define RAW_PACKET_BUFFER_SIZE 2500000 + int raw_packet_buffer_remaining_size; + + /** + * Offset to remap timestamps to be non-negative. + * Expressed in timebase units. + * @see AVStream.mux_ts_offset + */ + int64_t offset; + + /** + * Timebase for the timestamp offset. + */ + AVRational offset_timebase; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + */ + AVCodec *subtitle_codec; +} AVFormatContext; + +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see av_register_input_format() + * @see av_register_output_format() + */ +void av_register_all(void); + +void av_register_input_format(AVInputFormat *format); +void av_register_output_format(AVOutputFormat *format); + +/** + * Do global initialization of network components. This is optional, + * but recommended, since it avoids the overhead of implicitly + * doing the setup for each session. + * + * Calling this function will become mandatory if using network + * protocols at some major version bump. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. + */ +int avformat_network_deinit(void); + +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +AVInputFormat *av_iformat_next(AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +AVOutputFormat *av_oformat_next(AVOutputFormat *f); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +#if FF_API_ALLOC_OUTPUT_CONTEXT +/** + * @deprecated deprecated in favor of avformat_alloc_output_context2() + */ +attribute_deprecated +AVFormatContext *avformat_alloc_output_context(const char *format, + AVOutputFormat *oformat, + const char *filename); +#endif + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param filename the filename of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ +int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param filename Name of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +#if FF_API_FORMAT_PARAMETERS +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @return >=0 if OK, AVERROR_xxx on error + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + * + * @deprecated use avformat_find_stream_info. + */ +attribute_deprecated +int av_find_stream_info(AVFormatContext *ic); +#endif + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +#if FF_API_READ_PACKET +/** + * @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw + * unprocessed packets + * + * Read a transport packet from a media file. + * + * This function is obsolete and should never be used. + * Use av_read_frame() instead. + * + * @param s media file handle + * @param pkt is filled + * @return 0 if OK, AVERROR_xxx on error + */ +attribute_deprecated +int av_read_packet(AVFormatContext *s, AVPacket *pkt); +#endif + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * av_free_packet when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +#if FF_API_CLOSE_INPUT_FILE +/** + * @deprecated use avformat_close_input() + * Close a media file (but not its codecs). + * + * @param s media file handle + */ +attribute_deprecated +void av_close_input_file(AVFormatContext *s); +#endif + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#if FF_API_NEW_STREAM +/** + * Add a new stream to a media file. + * + * Can only be called in the read_header() function. If the flag + * AVFMTCTX_NOHEADER is in the format context, then new streams + * can be added in read_packet too. + * + * @param s media file handle + * @param id file-format-dependent stream ID + */ +attribute_deprecated +AVStream *av_new_stream(AVFormatContext *s, int id); +#endif + +#if FF_API_SET_PTS_INFO +/** + * @deprecated this function is not supposed to be called outside of lavf + */ +attribute_deprecated +void av_set_pts_info(AVStream *s, int pts_wrap_bits, + unsigned int pts_num, unsigned int pts_den); +#endif + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number + +/** + * @addtogroup lavf_encoding + * @{ + */ +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. + */ +int avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * The packet shall contain one audio or video frame. + * The packet must be correctly interleaved according to the container + * specification, if not then av_interleaved_write_frame must be used. + * + * @param s media file handle + * @param pkt The packet, which contains the stream_index, buf/buf_size, + * dts/pts, ... + * This can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, + * for muxers that buffer up data internally before writing it + * to the output. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + */ +int av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * The packet must contain one audio or video frame. + * If the packets are already correctly interleaved, the application should + * call av_write_frame() instead as it is slightly faster. It is also important + * to keep in mind that completely non-interleaved input will need huge amounts + * of memory to interleave with this, so it is preferable to interleave at the + * demuxer level. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. pkt->buf must be set + * to a valid AVBufferRef describing the packet data. Libavformat takes + * ownership of this reference and will unref it when it sees fit. The caller + * must not access the data through this reference after this function returns. + * This can be NULL (at any time, not just at the end), to flush the + * interleaving queues. + * Packet's @ref AVPacket.stream_index "stream_index" field must be set to the + * index of the corresponding stream in @ref AVFormatContext.streams + * "s.streams". + * It is very strongly recommended that timing information (@ref AVPacket.pts + * "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to + * correct values. + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, + AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +void av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param extensions a comma-separated list of filename extensions + */ +int av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avio.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avio.h new file mode 100644 index 0000000..4f4ac3c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/avio.h @@ -0,0 +1,481 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include + +#include "libavutil/common.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavformat/version.h" + + +#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int must_flush; /**< true if the next seek should flush */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; +} AVIOContext; + +/* unbuffered I/O */ + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int avio_check(const char *url, int flags); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with av_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with av_malloc() and friends. + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +void avio_w8(AVIOContext *s, int b); +void avio_write(AVIOContext *s, const unsigned char *buf, int size); +void avio_wl64(AVIOContext *s, uint64_t val); +void avio_wb64(AVIOContext *s, uint64_t val); +void avio_wl32(AVIOContext *s, unsigned int val); +void avio_wb32(AVIOContext *s, unsigned int val); +void avio_wl24(AVIOContext *s, unsigned int val); +void avio_wb24(AVIOContext *s, unsigned int val); +void avio_wl16(AVIOContext *s, unsigned int val); +void avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @return number of bytes written. + */ +int avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Passing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Oring this flag as into the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int url_feof(AVIOContext *s); + +/** @warning currently size is limited */ +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data to the output s. + * + * Force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + */ +void avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int avio_r8 (AVIOContext *s); +unsigned int avio_rl16(AVIOContext *s); +unsigned int avio_rl24(AVIOContext *s); +unsigned int avio_rl32(AVIOContext *s); +uint64_t avio_rl64(AVIOContext *s); +unsigned int avio_rb16(AVIOContext *s); +unsigned int avio_rb24(AVIOContext *s); +unsigned int avio_rb32(AVIOContext *s); +uint64_t avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_closep + */ +int avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_close + */ +int avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with av_free(). + * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * @param pause 1 for pause, 0 for resume + */ +int avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +#endif /* AVFORMAT_AVIO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/version.h new file mode 100644 index 0000000..0028c9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavformat/version.h @@ -0,0 +1,76 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "libavutil/avutil.h" + +#define LIBAVFORMAT_VERSION_MAJOR 55 +#define LIBAVFORMAT_VERSION_MINOR 19 +#define LIBAVFORMAT_VERSION_MICRO 104 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_ALLOC_OUTPUT_CONTEXT +#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FORMAT_PARAMETERS +#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_NEW_STREAM +#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_SET_PTS_INFO +#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CLOSE_INPUT_FILE +#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_READ_PACKET +#define FF_API_READ_PACKET (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ASS_SSA +#define FF_API_ASS_SSA (LIBAVFORMAT_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/avresample.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/avresample.h new file mode 100644 index 0000000..f62e533 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/avresample.h @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_AVRESAMPLE_H +#define AVRESAMPLE_AVRESAMPLE_H + +/** + * @file + * @ingroup lavr + * external API header + */ + +/** + * @defgroup lavr Libavresample + * @{ + * + * Libavresample (lavr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lavr is done through AVAudioResampleContext, which is + * allocated with avresample_alloc_context(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * AVAudioResampleContext *avr = avresample_alloc_context(); + * av_opt_set_int(avr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_int(avr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(avr, "in_sample_rate", 48000, 0); + * av_opt_set_int(avr, "out_sample_rate", 44100, 0); + * av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once the context is initialized, it must be opened with avresample_open(). If + * you need to change the conversion parameters, you must close the context with + * avresample_close(), change the parameters as described above, then reopen it + * again. + * + * The conversion itself is done by repeatedly calling avresample_convert(). + * Note that the samples may get buffered in two places in lavr. The first one + * is the output FIFO, where the samples end up if the output buffer is not + * large enough. The data stored in there may be retrieved at any time with + * avresample_read(). The second place is the resampling delay buffer, + * applicable only when resampling is done. The samples in it require more input + * before they can be processed. Their current amount is returned by + * avresample_get_delay(). At the end of conversion the resampling buffer can be + * flushed by calling avresample_convert() with NULL input. + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_linesize, in_samples; + * + * while (get_input(&input, &in_linesize, &in_samples)) { + * uint8_t *output + * int out_linesize; + * int out_samples = avresample_available(avr) + + * av_rescale_rnd(avresample_get_delay(avr) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, &out_linesize, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = avresample_convert(avr, &output, out_linesize, out_samples, + * input, in_linesize, in_samples); + * handle_output(output, out_linesize, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished and the FIFOs are flushed if required, the + * conversion context and everything associated with it must be freed with + * avresample_free(). + */ + +#include "libavutil/avutil.h" +#include "libavutil/channel_layout.h" +#include "libavutil/dict.h" +#include "libavutil/log.h" + +#include "libavresample/version.h" + +#define AVRESAMPLE_MAX_CHANNELS 32 + +typedef struct AVAudioResampleContext AVAudioResampleContext; + +/** Mixing Coefficient Types */ +enum AVMixCoeffType { + AV_MIX_COEFF_TYPE_Q8, /** 16-bit 8.8 fixed-point */ + AV_MIX_COEFF_TYPE_Q15, /** 32-bit 17.15 fixed-point */ + AV_MIX_COEFF_TYPE_FLT, /** floating-point */ + AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */ +}; + +/** Resampling Filter Types */ +enum AVResampleFilterType { + AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */ + AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +enum AVResampleDitherMethod { + AV_RESAMPLE_DITHER_NONE, /**< Do not use dithering */ + AV_RESAMPLE_DITHER_RECTANGULAR, /**< Rectangular Dither */ + AV_RESAMPLE_DITHER_TRIANGULAR, /**< Triangular Dither*/ + AV_RESAMPLE_DITHER_TRIANGULAR_HP, /**< Triangular Dither with High Pass */ + AV_RESAMPLE_DITHER_TRIANGULAR_NS, /**< Triangular Dither with Noise Shaping */ + AV_RESAMPLE_DITHER_NB, /**< Number of dither types. Not part of ABI. */ +}; + +/** + * Return the LIBAVRESAMPLE_VERSION_INT constant. + */ +unsigned avresample_version(void); + +/** + * Return the libavresample build-time configuration. + * @return configure string + */ +const char *avresample_configuration(void); + +/** + * Return the libavresample license. + */ +const char *avresample_license(void); + +/** + * Get the AVClass for AVAudioResampleContext. + * + * Can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options + * without allocating a context. + * + * @see av_opt_find(). + * + * @return AVClass for AVAudioResampleContext + */ +const AVClass *avresample_get_class(void); + +/** + * Allocate AVAudioResampleContext and set options. + * + * @return allocated audio resample context, or NULL on failure + */ +AVAudioResampleContext *avresample_alloc_context(void); + +/** + * Initialize AVAudioResampleContext. + * + * @param avr audio resample context + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_open(AVAudioResampleContext *avr); + +/** + * Close AVAudioResampleContext. + * + * This closes the context, but it does not change the parameters. The context + * can be reopened with avresample_open(). It does, however, clear the output + * FIFO and any remaining leftover samples in the resampling delay buffer. If + * there was a custom matrix being used, that is also cleared. + * + * @see avresample_convert() + * @see avresample_set_matrix() + * + * @param avr audio resample context + */ +void avresample_close(AVAudioResampleContext *avr); + +/** + * Free AVAudioResampleContext and associated AVOption values. + * + * This also calls avresample_close() before freeing. + * + * @param avr audio resample context + */ +void avresample_free(AVAudioResampleContext **avr); + +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libavresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param normalize if 1, coefficients will be normalized to prevent + * overflow. if 0, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, int normalize, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding); + +/** + * Get the current channel mixing matrix. + * + * If no custom matrix has been previously set or the AVAudioResampleContext is + * not open, an error is returned. + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_get_matrix(AVAudioResampleContext *avr, double *matrix, + int stride); + +/** + * Set channel mixing matrix. + * + * Allows for setting a custom mixing matrix, overriding the default matrix + * generated internally during avresample_open(). This function can be called + * anytime on an allocated context, either before or after calling + * avresample_open(), as long as the channel layouts have been set. + * avresample_convert() always uses the current matrix. + * Calling avresample_close() on the context will clear the current matrix. + * + * @see avresample_close() + * + * @param avr audio resample context + * @param matrix mixing coefficients; matrix[i + stride * o] is the weight of + * input channel i in output channel o. + * @param stride distance between adjacent input channels in the matrix array + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_matrix(AVAudioResampleContext *avr, const double *matrix, + int stride); + +/** + * Set a customized input channel mapping. + * + * This function can only be called when the allocated context is not open. + * Also, the input channel layout must have already been set. + * + * Calling avresample_close() on the context will clear the channel mapping. + * + * The map for each input channel specifies the channel index in the source to + * use for that particular channel, or -1 to mute the channel. Source channels + * can be duplicated by using the same index for multiple input channels. + * + * Examples: + * + * Reordering 5.1 AAC order (C,L,R,Ls,Rs,LFE) to FFmpeg order (L,R,C,LFE,Ls,Rs): + * { 1, 2, 0, 5, 3, 4 } + * + * Muting the 3rd channel in 4-channel input: + * { 0, 1, -1, 3 } + * + * Duplicating the left channel of stereo input: + * { 0, 0 } + * + * @param avr audio resample context + * @param channel_map customized input channel mapping + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_channel_mapping(AVAudioResampleContext *avr, + const int *channel_map); + +/** + * Set compensation for resampling. + * + * This can be called anytime after avresample_open(). If resampling is not + * automatically enabled because of a sample rate conversion, the + * "force_resampling" option must have been set to 1 when opening the context + * in order to use resampling compensation. + * + * @param avr audio resample context + * @param sample_delta compensation delta, in samples + * @param compensation_distance compensation distance, in samples + * @return 0 on success, negative AVERROR code on failure + */ +int avresample_set_compensation(AVAudioResampleContext *avr, int sample_delta, + int compensation_distance); + +/** + * Convert input samples and write them to the output FIFO. + * + * The upper bound on the number of output samples is given by + * avresample_available() + (avresample_get_delay() + number of input samples) * + * output sample rate / input sample rate. + * + * The output data can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to avresample_read(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. avresample_get_delay() tells the number of remaining + * samples. To get this data as output, call avresample_convert() with NULL + * input. + * + * At the end of the conversion process, there may be data remaining in the + * internal FIFO buffer. avresample_available() tells the number of remaining + * samples. To get this data as output, either call avresample_convert() with + * NULL input or call avresample_read(). + * + * @see avresample_available() + * @see avresample_read() + * @see avresample_get_delay() + * + * @param avr audio resample context + * @param output output data pointers + * @param out_plane_size output plane size, in bytes. + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * output, which could slow down some conversions. + * @param out_samples maximum number of samples that the output buffer can hold + * @param input input data pointers + * @param in_plane_size input plane size, in bytes + * This can be 0 if unknown, but that will lead to + * optimized functions not being used directly on the + * input, which could slow down some conversions. + * @param in_samples number of input samples to convert + * @return number of samples written to the output buffer, + * not including converted samples added to the internal + * output FIFO + */ +int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, + int out_plane_size, int out_samples, uint8_t **input, + int in_plane_size, int in_samples); + +/** + * Return the number of samples currently in the resampling delay buffer. + * + * When resampling, there may be a delay between the input and output. Any + * unconverted samples in each call are stored internally in a delay buffer. + * This function allows the user to determine the current number of samples in + * the delay buffer, which can be useful for synchronization. + * + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples currently in the resampling delay buffer + */ +int avresample_get_delay(AVAudioResampleContext *avr); + +/** + * Return the number of available samples in the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. The samples in the FIFO can be read with avresample_read() or + * avresample_convert(). + * + * @see avresample_read() + * @see avresample_convert() + * + * @param avr audio resample context + * @return number of samples available for reading + */ +int avresample_available(AVAudioResampleContext *avr); + +/** + * Read samples from the output FIFO. + * + * During conversion, if the user does not specify an output buffer or + * specifies an output buffer that is smaller than what is needed, remaining + * samples that are not written to the output are stored to an internal FIFO + * buffer. This function can be used to read samples from that internal FIFO. + * + * @see avresample_available() + * @see avresample_convert() + * + * @param avr audio resample context + * @param output output data pointers. May be NULL, in which case + * nb_samples of data is discarded from output FIFO. + * @param nb_samples number of samples to read from the FIFO + * @return the number of samples written to output + */ +int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples); + +/** + * @} + */ + +#endif /* AVRESAMPLE_AVRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/version.h new file mode 100644 index 0000000..bd52ca6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavresample/version.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVRESAMPLE_VERSION_H +#define AVRESAMPLE_VERSION_H + +/** + * @file + * @ingroup lavr + * Libavresample version macros. + */ + +#define LIBAVRESAMPLE_VERSION_MAJOR 1 +#define LIBAVRESAMPLE_VERSION_MINOR 1 +#define LIBAVRESAMPLE_VERSION_MICRO 0 + +#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_VERSION AV_VERSION(LIBAVRESAMPLE_VERSION_MAJOR, \ + LIBAVRESAMPLE_VERSION_MINOR, \ + LIBAVRESAMPLE_VERSION_MICRO) +#define LIBAVRESAMPLE_BUILD LIBAVRESAMPLE_VERSION_INT + +#define LIBAVRESAMPLE_IDENT "Lavr" AV_STRINGIFY(LIBAVRESAMPLE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_RESAMPLE_CLOSE_OPEN +#define FF_API_RESAMPLE_CLOSE_OPEN (LIBAVRESAMPLE_VERSION_MAJOR < 2) +#endif + +#endif /* AVRESAMPLE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/adler32.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/adler32.h new file mode 100644 index 0000000..8c08d2b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/adler32.h @@ -0,0 +1,52 @@ +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include +#include "attributes.h" + +/** + * @defgroup lavu_adler32 Adler32 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/aes.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/aes.h new file mode 100644 index 0000000..09efbda --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/aes.h @@ -0,0 +1,65 @@ +/* + * copyright (c) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/attributes.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/attributes.h new file mode 100644 index 0000000..8c0e5b2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/attributes.h @@ -0,0 +1,160 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + + +#if defined(__GNUC__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#ifdef __GNUC__ +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audio_fifo.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audio_fifo.h new file mode 100644 index 0000000..903b8f1 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audio_fifo.h @@ -0,0 +1,149 @@ +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audioconvert.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audioconvert.h new file mode 100644 index 0000000..300a67c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/audioconvert.h @@ -0,0 +1,6 @@ + +#include "version.h" + +#if FF_API_AUDIOCONVERT +#include "channel_layout.h" +#endif diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avassert.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avassert.h new file mode 100644 index 0000000..41f5e0e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avassert.h @@ -0,0 +1,66 @@ +/* + * copyright (c) 2010 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speedloss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#else +#define av_assert2(cond) ((void)0) +#endif + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avconfig.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avconfig.h new file mode 100644 index 0000000..bac323a --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avconfig.h @@ -0,0 +1,8 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 0 +#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 +#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avstring.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avstring.h new file mode 100644 index 0000000..438ef79 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avstring.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with av_free(). + */ +char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to a av_malloced string. + */ +char *av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +int av_isdigit(int c); + +/** + * Locale-independent conversion of ASCII isgraph. + */ +int av_isgraph(int c); + +/** + * Locale-independent conversion of ASCII isspace. + */ +int av_isspace(int c); + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +int av_isxdigit(int c); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strncasecmp(const char *a, const char *b, size_t n); + + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *av_dirname(char *path); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE 0x01 + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT 0x02 + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see av_bprint_escape() + */ +int av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avutil.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avutil.h new file mode 100644 index 0000000..4692c00 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/avutil.h @@ -0,0 +1,320 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * external API header + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref lsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu Common utility functions + * + * @brief + * libavutil contains the code shared across all the other FFmpeg + * libraries + * + * @note In order to use the functions provided by avutil you must include + * the specific header. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Maths + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup lavu_internal Internal + * + * Not exported functions, for internal usage only + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1< + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/blowfish.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/blowfish.h new file mode 100644 index 0000000..0b00453 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/blowfish.h @@ -0,0 +1,77 @@ +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bprint.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bprint.h new file mode 100644 index 0000000..839ec1e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bprint.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ +#define FF_PAD_STRUCTURE(size, ...) \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct { __VA_ARGS__ })]; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ +typedef struct AVBPrint { + FF_PAD_STRUCTURE(1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; + ) +} AVBPrint; + +/** + * Convenience macros for special values for av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bswap.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bswap.h new file mode 100644 index 0000000..06f6548 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_BFIN +# include "bfin/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/buffer.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/buffer.h new file mode 100644 index 0000000..b4399fd --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/buffer.h @@ -0,0 +1,274 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- av_buffer_alloc() to just allocate a new buffer, and + * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with av_buffer_ref(). + * Use av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The av_buffer_is_writable() function is + * provided to check whether this is true and av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *av_buffer_alloc(int size); + +/** + * Same as av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls av_free() on the buffer data. + * This function is meant to be passed to av_buffer_create(), not called + * directly. + */ +void av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until av_buffer_ref() is called on buf. + */ +int av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by av_buffer_create. + */ +void *av_buffer_get_opaque(const AVBufferRef *buf); + +int av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with av_realloc() only if it was + * initially allocated through av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to + * get a reference to a new buffer, similar to av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with av_buffer_pool_init() and freed with + * av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + * @see av_buffer_pool_can_uninit() + */ +void av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/channel_layout.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/channel_layout.h new file mode 100644 index 0000000..ba4f96d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/channel_layout.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2006 Michael Niedermayer + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel convenience macros + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_NB +}; + +/** + * @} + */ + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, optionally followed by 'c', yielding + * the default channel layout for that number of channels (@see + * av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * @warning Starting from the next major bump the trailing character + * 'c' to specify a number of channels will be required, while a + * channel layout mask could also be specified as a decimal number + * (if and only if not followed by "c"). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t av_get_channel_layout(const char *name); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/common.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/common.h new file mode 100644 index 0000000..b1203ad --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/common.h @@ -0,0 +1,464 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#include +#include +#include +#include +#include +#include +#include + +#include "attributes.h" +#include "version.h" +#include "libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* assume a>0 and b>0 */ +#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +/* misc math functions */ + +/** + * Reverse the order of the bits of an 8-bits unsigned integer. + */ +#if FF_API_AV_REVERSE +extern attribute_deprecated const uint8_t av_reverse[256]; +#endif + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef av_log2 +av_const int av_log2(unsigned v); +#endif + +#ifndef av_log2_16bit +av_const int av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<> 31 & ((1<= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= GET_BYTE;\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= GET_BYTE - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/cpu.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/cpu.h new file mode 100644 index 0000000..55c3ec9 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/cpu.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +// #if LIBAVUTIL_VERSION_MAJOR <52 +#define AV_CPU_FLAG_CMOV 0x1001000 ///< supports cmov instruction +// #else +// #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +// #endif +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by av_force_cpu_flags() if that was used + * before. So av_get_cpu_flags() can easily be used in a application to + * detect the enabled cpu flags. + */ +int av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + * + * @warning this function is not thread safe. + */ +attribute_deprecated void av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int av_cpu_count(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/crc.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/crc.h new file mode 100644 index 0000000..f4219ca --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/crc.h @@ -0,0 +1,85 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include +#include +#include "attributes.h" + +/** + * @defgroup lavu_crc32 CRC32 + * @ingroup lavu_crypto + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_24_IEEE = 12, + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see av_crc_init() "le" parameter + */ +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/dict.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/dict.h new file mode 100644 index 0000000..38f03a4 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/dict.h @@ -0,0 +1,152 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + * @code + * AVDictionary *d = NULL; // "create" an empty dictionary + * av_dict_set(&d, "foo", "bar", 0); // add an entry + * + * char *k = av_strdup("key"); // if your strings are already allocated, + * char *v = av_strdup("value"); // you can avoid copying them like this + * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + * + * AVDictionaryEntry *t = NULL; + * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + * <....> // iterate over all entries in d + * } + * + * av_dict_free(&d); + * @endcode + * + */ + +#define AV_DICT_MATCH_CASE 1 +#define AV_DICT_IGNORE_SUFFIX 2 +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() and children. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() and chilren. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param flags Allows case as well as suffix-insensitive comparisons. + * @return Found entry or NULL, changing key or value leads to undefined behavior. + */ +AVDictionaryEntry * +av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will be av_strduped depending on flags) + * @param value entry value to add to *pm (will be av_strduped depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Parse the key/value pairs list and add to a dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + */ +void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void av_dict_free(AVDictionary **m); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/error.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/error.h new file mode 100644 index 0000000..f3fd7bb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/error.h @@ -0,0 +1,117 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include +#include + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. + +#define AV_ERROR_MAX_STRING_SIZE 64 + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/eval.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/eval.h new file mode 100644 index 0000000..6159b0f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/eval.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2002 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with av_expr_parse(). + */ +void av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value for + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/fifo.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/fifo.h new file mode 100644 index 0000000..849b9a6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/fifo.h @@ -0,0 +1,144 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc(unsigned int size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void av_fifo_free(AVFifoBuffer *f); + +/** + * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int av_fifo_size(AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int av_fifo_space(AVFifoBuffer *f); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/file.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/file.h new file mode 100644 index 0000000..a7364fe --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/file.h @@ -0,0 +1,66 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * The returned buffer must be released with av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by av_file_map() + */ +void av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or -1 on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + */ +int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/frame.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/frame.h new file mode 100644 index 0000000..1c785dd --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/frame.h @@ -0,0 +1,659 @@ +/* + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include + +#include "libavcodec/version.h" + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" + +enum AVColorSpace{ + AVCOL_SPC_RGB = 0, + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_FCC = 4, + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above + AVCOL_SPC_SMPTE240M = 7, + AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_NB , ///< Not part of ABI +}; +#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG + +enum AVColorRange{ + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB , ///< Not part of ABI +}; + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, +}; + +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * Similarly fields that are marked as to be only accessed by + * av_opt_ptr() can be reordered. This allows 2 forks to add fields + * without breaking compatibility with each other. + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiplies of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + uint8_t *base[AV_NUM_DATA_POINTERS]; +#endif + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + */ + int64_t pkt_pts; + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int reference; + + /** + * QP table + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + /** + * mbskip_table[mb]>=1 if MB didn't change + * stride= mb_width = (width+15)>>4 + */ + attribute_deprecated + uint8_t *mbskip_table; + + /** + * motion vector table + * @code + * example: + * int mv_sample_log2= 4 - motion_subsample_log2; + * int mb_width= (width+15)>>4; + * int mv_stride= (mb_width << mv_sample_log2) + 1; + * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; + * @endcode + */ + attribute_deprecated + int16_t (*motion_val[2])[2]; + + /** + * macroblock type table + * mb_type_base + mb_width + 2 + */ + attribute_deprecated + uint32_t *mb_type; + + /** + * DCT coefficients + */ + attribute_deprecated + short *dct_coeff; + + /** + * motion reference frame index + * the order in which these are stored can depend on the codec. + */ + attribute_deprecated + int8_t *ref_index[2]; +#endif + + /** + * for some private data of the user + */ + void *opaque; + + /** + * error + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int type; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + +#if FF_API_AVFRAME_LAVC + attribute_deprecated + int buffer_hints; + + /** + * Pan scan. + */ + attribute_deprecated + struct AVPanScan *pan_scan; +#endif + + /** + * reordered opaque 64bit (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + +#if FF_API_AVFRAME_LAVC + /** + * @deprecated this field is unused + */ + attribute_deprecated void *hwaccel_picture_private; + + attribute_deprecated + struct AVCodecContext *owner; + attribute_deprecated + void *thread_opaque; + + /** + * log2 of the size of the block which a single vector in motion_val represents: + * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) + */ + attribute_deprecated + uint8_t motion_subsample_log2; +#endif + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * Code outside libavcodec should access this field using: + * av_frame_get_best_effort_timestamp(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_pos(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * Code outside libavcodec should access this field using: + * av_frame_get_pkt_duration(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * Code outside libavcodec should access this field using: + * av_frame_get_metadata(frame) + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * Code outside libavcodec should access this field using: + * av_frame_get_decode_error_flags(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * Code outside libavcodec should access this field using: + * av_frame_get_channels(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. It must be accessed using av_frame_get_pkt_size() and + * av_frame_set_pkt_size(). + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + + /** + * YUV colorspace type. + * It must be accessed using av_frame_get_colorspace() and + * av_frame_set_colorspace(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * It must be accessed using av_frame_get_color_range() and + * av_frame_set_color_range(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + + /** + * Not to be accessed directly from outside libavutil + */ + AVBufferRef *qp_table_buf; +} AVFrame; + +/** + * Accessors for some AVFrame fields. + * The position of these field in the structure is not part of the ABI, + * they should not be accessed directly outside libavcodec. + */ +int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); +void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_duration (const AVFrame *frame); +void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_pos (const AVFrame *frame); +void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +int64_t av_frame_get_channel_layout (const AVFrame *frame); +void av_frame_set_channel_layout (AVFrame *frame, int64_t val); +int av_frame_get_channels (const AVFrame *frame); +void av_frame_set_channels (AVFrame *frame, int val); +int av_frame_get_sample_rate (const AVFrame *frame); +void av_frame_set_sample_rate (AVFrame *frame, int val); +AVDictionary *av_frame_get_metadata (const AVFrame *frame); +void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +int av_frame_get_decode_error_flags (const AVFrame *frame); +void av_frame_set_decode_error_flags (AVFrame *frame, int val); +int av_frame_get_pkt_size(const AVFrame *frame); +void av_frame_set_pkt_size(AVFrame *frame, int val); +AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); +int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame); +void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +enum AVColorRange av_frame_get_color_range(const AVFrame *frame); +void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Setup a new reference to the data described by a given frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everythnig contained in src to dst and reset src. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +#endif /* AVUTIL_FRAME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/hmac.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/hmac.h new file mode 100644 index 0000000..d36d4de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/hmac.h @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include + +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224 = 10, + AV_HMAC_SHA256, + AV_HMAC_SHA384, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/imgutils.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/imgutils.h new file mode 100644 index 0000000..ab32d66 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/imgutils.h @@ -0,0 +1,200 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param[in] align the assumed linesize alignment + */ +int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesizes linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat.h new file mode 100644 index 0000000..fe3d7ec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat_readwrite.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat_readwrite.h new file mode 100644 index 0000000..9709f4d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intfloat_readwrite.h @@ -0,0 +1,40 @@ +/* + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_READWRITE_H +#define AVUTIL_INTFLOAT_READWRITE_H + +#include +#include "attributes.h" + +/* IEEE 80 bits extended float */ +typedef struct AVExtFloat { + uint8_t exponent[2]; + uint8_t mantissa[8]; +} AVExtFloat; + +attribute_deprecated double av_int2dbl(int64_t v) av_const; +attribute_deprecated float av_int2flt(int32_t v) av_const; +attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const; +attribute_deprecated int64_t av_dbl2int(double d) av_const; +attribute_deprecated int32_t av_flt2int(float d) av_const; +attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const; + +#endif /* AVUTIL_INTFLOAT_READWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intreadwrite.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intreadwrite.h new file mode 100644 index 0000000..7ee6977 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/intreadwrite.h @@ -0,0 +1,621 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +#include "libavutil/avconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lfg.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lfg.h new file mode 100644 index 0000000..ec90562 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lfg.h @@ -0,0 +1,62 @@ +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/log.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/log.h new file mode 100644 index 0000000..55459e8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/log.h @@ -0,0 +1,313 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_NB, ///< not part of ABI/API +}AVClassCategory; + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param ap The arguments referenced by the format string. + */ +void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* av_default_item_name(void* ctx); +AVClassCategory av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formated line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * av_dlog macros + * Useful to print debug messages that shouldn't get compiled in normally. + */ + +#ifdef DEBUG +# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif + +/** + * Skip repeated messages, this requires the user app to use av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 +void av_log_set_flags(int arg); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lzo.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lzo.h new file mode 100644 index 0000000..c034039 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mathematics.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mathematics.h new file mode 100644 index 0000000..71f0392 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mathematics.h @@ -0,0 +1,147 @@ +/* + * copyright (c) 2005-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include +#include +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * @{ + */ + + +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE +}; + +/** + * Return the greatest common divisor of a and b. + * If both a and b are 0 or either or both are <0 then behavior is + * undefined. + */ +int64_t av_const av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * A simple a*b/c isn't possible as it can overflow. + */ +int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * A simple a*b/c isn't possible as it can overflow. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding) av_const; + +/** + * Compare 2 timestamps each in its own timebases. + * The result of the function is undefined if one of the timestamps + * is outside the int64_t range when represented in the others timebase. + * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position + */ +int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare 2 integers modulo mod. + * That is we compare integers a and b for which only the least + * significant log2(mod) bits are known. + * + * @param mod must be a power of 2 + * @return a negative value if a is smaller than b + * a positive value if a is greater than b + * 0 if a equals b + */ +int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * @param in_ts Input timestamp + * @param in_tb Input timesbase + * @param fs_tb Duration and *last timebase + * @param duration duration till the next call + * @param out_tb Output timesbase + */ +int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/md5.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/md5.h new file mode 100644 index 0000000..79702c8 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/md5.h @@ -0,0 +1,81 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size av_md5_size) + */ +void av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mem.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mem.h new file mode 100644 index 0000000..77b7adc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/mem.h @@ -0,0 +1,342 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include +#include + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * @{ + */ + + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__TI_COMPILER_VERSION__) + #define DECLARE_ALIGNED(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + t __attribute__((aligned(n))) v + #define DECLARE_ASM_CONST(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + static const t __attribute__((aligned(n))) v +#elif defined(__GNUC__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU). + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + */ +void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of size * nmemb bytes with av_malloc(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_malloc() + */ +av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_malloc(nmemb * size); +} + +/** + * Allocate or reallocate a block of memory. + * If ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param size Size in bytes of the memory block to be allocated or + * reallocated. + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + * @see av_fast_realloc() + */ +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate or reallocate a block of memory. + * This function does the same thing as av_realloc, except: + * - It takes two arguments and checks the result of the multiplication for + * integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic "buf = realloc(buf); if (!buf) return -1;". + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate or reallocate a block of memory. + * If *ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param size Size in bytes for the memory block to be allocated or + * reallocated + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_reallocp(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +int av_reallocp(void *ptr, size_t size); + +/** + * Allocate or reallocate an array. + * If ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate or reallocate an array through a pointer to a pointer. + * If *ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc(). + * @param ptr Pointer to the memory block which should be freed. + * @note ptr = NULL is explicitly allowed. + * @note It is recommended that you use av_freep() instead. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if it cannot be allocated. + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of nmemb * size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * The allocation will fail if nmemb * size is greater than or equal + * to INT_MAX. + * @param nmemb + * @param size + * @return Pointer to the allocated block, NULL if it cannot be allocated. + */ +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate a block of size * nmemb bytes with av_mallocz(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + * @see av_malloc_array() + */ +av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_mallocz(nmemb * size); +} + +/** + * Duplicate the string s. + * @param s string to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of s or NULL if the string cannot be allocated. + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate the buffer p. + * @param p buffer to be duplicated + * @return Pointer to a newly allocated buffer containing a + * copy of p or NULL if the buffer cannot be allocated. + */ +void *av_memdup(const void *p, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc() and set the pointer pointing to it to NULL. + * @param ptr Pointer to the pointer to the memory block which should + * be freed. + * @see av_free() + */ +void av_freep(void *ptr); + +/** + * Add an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem element to add + * @see av_dynarray2_add() + */ +void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size elem_size to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem_size size in bytes of the elements in the array + * @param elem_data pointer to the data of the element to add. If NULL, the space of + * the new added element is not filled. + * @return pointer to the data of the element to copy in the new allocated space. + * If NULL, the new allocated space is left uninitialized." + * @see av_dynarray_add() + */ +void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * Multiply two size_t values checking for overflow. + * @return 0 if success, AVERROR(EINVAL) if overflow. + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: only try the division if nelem and elsize + * are both greater than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may me allocated in one block. + */ +void av_max_alloc(size_t max); + +/** + * deliberately overlapping memcpy implementation + * @param dst destination buffer + * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 + * @param cnt number of bytes to copy, must be >= 0 + * + * cnt > back is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of back. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/murmur3.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/murmur3.h new file mode 100644 index 0000000..f29ed97 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/murmur3.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include + +struct AVMurMur3 *av_murmur3_alloc(void); +void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); +void av_murmur3_init(struct AVMurMur3 *c); +void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/old_pix_fmts.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/old_pix_fmts.h new file mode 100644 index 0000000..3ee8aec --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/old_pix_fmts.h @@ -0,0 +1,175 @@ +/* + * copyright (c) 2006-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OLD_PIX_FMTS_H +#define AVUTIL_OLD_PIX_FMTS_H + +/* + * This header exists to prevent new pixel formats from being accidentally added + * to the deprecated list. + * Do not include it directly. It will be removed on next major bump + * + * Do not add new items to this list. Use the AVPixelFormat enum instead. + */ + PIX_FMT_NONE = AV_PIX_FMT_NONE, + PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + PIX_FMT_GRAY8, ///< Y , 8bpp + PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + PIX_FMT_XVMC_MPEG2_IDCT, + PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus + //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately + //is better + PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian + PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian + PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian + PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian + PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian + PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian + PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian + PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian + PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian + + PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +#endif /* AVUTIL_OLD_PIX_FMTS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/opt.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/opt.h new file mode 100644 index 0000000..14faa6e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/opt.h @@ -0,0 +1,757 @@ +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = av_malloc(sizeof(*ret)); + * ret->class = &test_class; + * av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * av_opt_free(*foo); + * av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are av_opt_next(), which iterates + * over all options defined for one object, and av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call av_opt_child_next() recursively (and + * av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to av_opt_set() is enough. For + * non-string type options, av_opt_set() will parse the string according to the + * option type. + * + * Similarly av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This allows to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_CONST = 128, + AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), + AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), + AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational + AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), + AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), +#if FF_API_OLD_AVOPTIONS + FF_OPT_TYPE_FLAGS = 0, + FF_OPT_TYPE_INT, + FF_OPT_TYPE_INT64, + FF_OPT_TYPE_DOUBLE, + FF_OPT_TYPE_FLOAT, + FF_OPT_TYPE_STRING, + FF_OPT_TYPE_RATIONAL, + FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + FF_OPT_TYPE_CONST=128, +#endif +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + double value_min, value_max; ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count + double component_min, component_max; ///< For string this represents the unicode range for chars, 0-127 limits to ASCII + int is_range; ///< if set to 1 the struct encodes a range, if set to 0 a single value +} AVOptionRange; + +/** + * List of AVOptionRange structs + */ +typedef struct AVOptionRanges { + AVOptionRange **range; + int nb_ranges; +} AVOptionRanges; + + +#if FF_API_FIND_OPT +/** + * Look for an option in obj. Look only for the options which + * have the flags set as specified in mask and flags (that is, + * for which it is the case that (opt->flags & mask) == flags). + * + * @param[in] obj a pointer to a struct whose first element is a + * pointer to an AVClass + * @param[in] name the name of the option to look for + * @param[in] unit the unit of the option to look for, or any if NULL + * @return a pointer to the option found, or NULL if no option + * has been found + * + * @deprecated use av_opt_find. + */ +attribute_deprecated +const AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags); +#endif + +#if FF_API_OLD_AVOPTIONS +/** + * Set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an + * AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. If the field is not of a string + * type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param[out] o_out if non-NULL put here a pointer to the AVOption + * found + * @param alloc this parameter is currently ignored + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + * @deprecated use av_opt_set() + */ +attribute_deprecated +int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out); + +attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n); +attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n); +attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n); + +double av_get_double(void *obj, const char *name, const AVOption **o_out); +AVRational av_get_q(void *obj, const char *name, const AVOption **o_out); +int64_t av_get_int(void *obj, const char *name, const AVOption **o_out); +attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len); +attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last); +#endif + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void av_opt_set_defaults(void *s); + +#if FF_API_OLD_AVOPTIONS +attribute_deprecated +void av_opt_set_defaults2(void *s, int mask, int flags); +#endif + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + */ +int av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all string and binary options in obj. + */ +void av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict(void *obj, struct AVDictionary **options); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using av_free() + * @param rval parsed value; must be freed using av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the + given object first. */ +/** + * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ 0x0002 + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with av_set_string3(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *av_opt_next(void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int av_opt_set_double(void *obj, const char *name, double val, int search_flags); +int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller + */ +int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val); +int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_freep_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * + * The result must be freed with av_opt_free_ranges. + * + * @return >= 0 on success, a negative errro code otherwise + */ +int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/parseutils.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/parseutils.h new file mode 100644 index 0000000..c80f0de --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/parseutils.h @@ -0,0 +1,187 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * In particular it actually supports the parameters: + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this + * function call, or NULL in case the function fails to match all of + * the fmt string and therefore an error occurred + */ +char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixdesc.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixdesc.h new file mode 100644 index 0000000..e88bf9b --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixdesc.h @@ -0,0 +1,291 @@ +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include + +#include "attributes.h" +#include "pixfmt.h" + +typedef struct AVComponentDescriptor{ + uint16_t plane :2; ///< which of the 4 planes contains the component + + /** + * Number of elements between 2 horizontally consecutive pixels minus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t step_minus1 :3; + + /** + * Number of elements before the component of the first pixel plus 1. + * Elements are bits for bitstream formats, bytes otherwise. + */ + uint16_t offset_plus1 :3; + uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value + uint16_t depth_minus1 :4; ///< number of bits in the component minus 1 +}AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor{ + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = -((-luma_width) >> log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w) + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= -((-luma_height) >> log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + uint8_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 2 or 4 components, then alpha is last. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components, + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + */ + AVComponentDescriptor comp[4]; +}AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) +/** + * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as + * paletted internally, but the palette is generated by the decoder and is not + * stored in the file. + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) +/** + * The pixel format has an alpha channel. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +#if FF_API_PIX_FMT +/** + * @deprecated use the AV_PIX_FMT_FLAG_* flags + */ +#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE +#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL +#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM +#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL +#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR +#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB +#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL +#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA +#endif + +#if FF_API_PIX_FMT_DESC +/** + * The array of all the pixel format descriptors. + */ +extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[]; +#endif + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, int x, int y, int c, int w); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt); + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * See avcodec_get_chroma_sub_sample() for a function that asserts a + * valid pixel format instead of returning an error code. + * Its recommanded that you use avcodec_get_chroma_sub_sample unless + * you do check the return code! + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +void ff_check_pixfmt_descriptors(void); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixfmt.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixfmt.h new file mode 100644 index 0000000..7b17a4f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/pixfmt.h @@ -0,0 +1,397 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + * + */ + +#include "libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @par + * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + * + * @note + * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1 + * and that all newly added little-endian formats have (pix_fmt & 1) == 0. + * This allows simpler detection of big vs little-endian. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range + AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + AV_PIX_FMT_XVMC_MPEG2_IDCT, + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + +#ifdef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + + /** + * duplicated pixel formats for compatibility with libav. + * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55) + * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85) + */ + AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + +#ifndef AV_PIX_FMT_ABI_GIT_MASTER + AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian +#endif + AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + + AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions + +#if FF_API_PIX_FMT +#include "old_pix_fmts.h" +#endif +}; + +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV +#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV +#endif + + +#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A +#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) + +#if FF_API_PIX_FMT +#define PixelFormat AVPixelFormat + +#define PIX_FMT_Y400A AV_PIX_FMT_Y400A +#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P + +#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le) + +#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 +#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1 +#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32 +#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1 +#define PIX_FMT_0RGB32 AV_PIX_FMT_0RGB32 +#define PIX_FMT_0BGR32 AV_PIX_FMT_0BGR32 + +#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16 +#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48 +#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565 +#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555 +#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444 +#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48 +#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565 +#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555 +#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444 + +#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9 +#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9 +#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9 +#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10 +#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10 +#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10 +#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12 +#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12 +#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12 +#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14 +#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14 +#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14 +#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16 +#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16 +#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16 + +#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64 +#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64 +#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9 +#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10 +#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12 +#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14 +#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16 +#endif + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/random_seed.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/random_seed.h new file mode 100644 index 0000000..0462a04 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/random_seed.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/rational.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/rational.h new file mode 100644 index 0000000..b9800ee --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/rational.h @@ -0,0 +1,155 @@ +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * rational numbers + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_math + * @{ + */ + +/** + * rational number numerator/denominator + */ +typedef struct AVRational{ + int num; ///< numerator + int den; ///< denominator +} AVRational; + +/** + * Compare two rationals. + * @param a first rational + * @param b second rational + * @return 0 if a==b, 1 if a>b, -1 if a>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert rational to double. + * @param a rational to convert + * @return (double) a + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * This is useful for framerate calculations. + * @param dst_num destination numerator + * @param dst_den destination denominator + * @param num source numerator + * @param den source denominator + * @param max the maximum allowed for dst_num & dst_den + * @return 1 if exact, 0 otherwise + */ +int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b first rational + * @param c second rational + * @return b*c + */ +AVRational av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b first rational + * @param c second rational + * @return b/c + */ +AVRational av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b first rational + * @param c second rational + * @return b+c + */ +AVRational av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b first rational + * @param c second rational + * @return b-c + */ +AVRational av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * inf is expressed as {1,0} or {-1,0} depending on the sign. + * + * @param d double to convert + * @param max the maximum allowed numerator and denominator + * @return (AVRational) d + */ +AVRational av_d2q(double d, int max) av_const; + +/** + * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer + * than q1, 0 if they have the same distance. + */ +int av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the nearest value in q_list to q. + * @param q_list an array of rationals terminated by {0, 0} + * @return the index of the nearest value found in the array + */ +int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/ripemd.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/ripemd.h new file mode 100644 index 0000000..7b0c8bc --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/ripemd.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/samplefmt.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/samplefmt.h new file mode 100644 index 0000000..db17d43 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/samplefmt.h @@ -0,0 +1,256 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include + +#include "avutil.h" +#include "attributes.h" + +/** + * Audio Sample Formats + * + * @par + * The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * @par + * The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * @par + * The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +#if FF_API_GET_BITS_PER_SAMPLE_FMT +/** + * @deprecated Use av_get_bytes_per_sample() instead. + */ +attribute_deprecated +int av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt); +#endif + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see av_samples_fill_arrays() + * @see av_samples_alloc_array_and_samples() + */ +int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see av_samples_alloc() + */ +int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha.h new file mode 100644 index 0000000..bf4377e --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha512.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha512.h new file mode 100644 index 0000000..7b08701 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/sha512.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA512 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/time.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/time.h new file mode 100644 index 0000000..90eb436 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/time.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include + +/** + * Get the current time in microseconds. + */ +int64_t av_gettime(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timecode.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timecode.h new file mode 100644 index 0000000..56e3975 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timecode.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 16 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timestamp.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timestamp.h new file mode 100644 index 0000000..f63a08c --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/timestamp.h @@ -0,0 +1,74 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/version.h new file mode 100644 index 0000000..dd70beb --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/version.h @@ -0,0 +1,153 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +/** + * @defgroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +#define AV_PRAGMA(s) _Pragma(#s) + +/** + * @} + */ + +/** + * @defgroup version_utils Library Version Macros + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * @} + */ + + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 52 +#define LIBAVUTIL_VERSION_MINOR 48 +#define LIBAVUTIL_VERSION_MICRO 101 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @} + * + * @defgroup depr_guards Deprecation guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @{ + */ + +#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT +#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_FIND_OPT +#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_OLD_AVOPTIONS +#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT +#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CONTEXT_SIZE +#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_PIX_FMT_DESC +#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AV_REVERSE +#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AUDIOCONVERT +#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_CPU_FLAG_MMX2 +#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO +#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS_PRIVATE +#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_LLS1 +#define FF_API_LLS1 (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_AVFRAME_LAVC +#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 53) +#endif +#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT +#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 53) +#endif + +/** + * @} + */ + +#endif /* AVUTIL_VERSION_H */ + diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/xtea.h b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/xtea.h new file mode 100644 index 0000000..0899c92 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libavutil/xtea.h @@ -0,0 +1,62 @@ +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include + +/** + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption + */ +void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/swresample.h b/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/swresample.h new file mode 100644 index 0000000..3811301 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/swresample.h @@ -0,0 +1,311 @@ +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr Libswresample + * @{ + * + * Libswresample (lswr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix): + * @code + * SwrContext *swr = swr_alloc(); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * Once all values have been set, it must be initialized with swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * as described above, or by using swr_alloc_set_opts(), then call swr_init() + * again. + * + * The conversion itself is done by repeatedly calling swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * swr_convert() with NULL in and 0 in_count. + * + * The delay between input and output, can at any time be found by using + * swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with swr_free(). + * There will be no memory leak if the data is not completely flushed before + * swr_free(). + */ + +#include +#include "libavutil/samplefmt.h" + +#include "libswresample/version.h" + +#if LIBSWRESAMPLE_VERSION_MAJOR < 1 +#define SWR_CH_MAX 32 ///< Maximum number of channels +#endif + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */ +}; + +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for swrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *swr_get_class(void); + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with swr_alloc_set_opts()) before calling swr_init(). + * + * @see swr_alloc_set_opts(), swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * + * @return AVERROR error code in case of failure. + */ +int swr_init(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s Swr context, can be NULL + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int log_offset, void *log_ctx); + +/** + * Free the given SwrContext and set the pointer to NULL. + */ +void swr_free(struct SwrContext **s); + +/** + * Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space then the input will be buffered. + * You can avoid this buffering by providing more output space than input. + * Convertion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers + * + * @param pts timestamp for the next input sample, INT64_MIN if unknown + * @return the output timestamp for the next output sample + */ +int64_t swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * Activate resampling compensation. + */ +int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param s allocated Swr context, not yet initialized + * @param channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return AVERROR error code in case of failure. + */ +int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return AVERROR error code in case of failure. + */ +int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * Drops the specified number of output samples. + */ +int swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + */ +int swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be + * if its set to 1 the returned delay is in seconds + * if its set to 1000 the returned delay is in milli seconds + * if its set to the input sample rate then the returned delay is in input samples + * if its set to the output sample rate then the returned delay is in output samples + * an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate) + * @returns the delay in 1/base units. + */ +int64_t swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Return the LIBSWRESAMPLE_VERSION_INT constant. + */ +unsigned swresample_version(void); + +/** + * Return the swr build-time configuration. + */ +const char *swresample_configuration(void); + +/** + * Return the swr license. + */ +const char *swresample_license(void); + +/** + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/version.h new file mode 100644 index 0000000..464c86d --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWR_VERSION_H +#define SWR_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 0 +#define LIBSWRESAMPLE_VERSION_MINOR 17 +#define LIBSWRESAMPLE_VERSION_MICRO 104 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWR_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/swscale.h b/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/swscale.h new file mode 100644 index 0000000..42702b7 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/swscale.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2001-2011 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_SWSCALE_H +#define SWSCALE_SWSCALE_H + +/** + * @file + * @ingroup lsws + * external API header + */ + +/** + * @defgroup lsws Libswscale + * @{ + */ + +#include + +#include "libavutil/avutil.h" +#include "libavutil/log.h" +#include "libavutil/pixfmt.h" +#include "version.h" + +/** + * Return the LIBSWSCALE_VERSION_INT constant. + */ +unsigned swscale_version(void); + +/** + * Return the libswscale build-time configuration. + */ +const char *swscale_configuration(void); + +/** + * Return the libswscale license. + */ +const char *swscale_license(void); + +/* values for the flags, the stuff on the command line is different */ +#define SWS_FAST_BILINEAR 1 +#define SWS_BILINEAR 2 +#define SWS_BICUBIC 4 +#define SWS_X 8 +#define SWS_POINT 0x10 +#define SWS_AREA 0x20 +#define SWS_BICUBLIN 0x40 +#define SWS_GAUSS 0x80 +#define SWS_SINC 0x100 +#define SWS_LANCZOS 0x200 +#define SWS_SPLINE 0x400 + +#define SWS_SRC_V_CHR_DROP_MASK 0x30000 +#define SWS_SRC_V_CHR_DROP_SHIFT 16 + +#define SWS_PARAM_DEFAULT 123456 + +#define SWS_PRINT_INFO 0x1000 + +//the following 3 flags are not completely implemented +//internal chrominace subsampling info +#define SWS_FULL_CHR_H_INT 0x2000 +//input subsampling info +#define SWS_FULL_CHR_H_INP 0x4000 +#define SWS_DIRECT_BGR 0x8000 +#define SWS_ACCURATE_RND 0x40000 +#define SWS_BITEXACT 0x80000 +#define SWS_ERROR_DIFFUSION 0x800000 + +#if FF_API_SWS_CPU_CAPS +/** + * CPU caps are autodetected now, those flags + * are only provided for API compatibility. + */ +#define SWS_CPU_CAPS_MMX 0x80000000 +#define SWS_CPU_CAPS_MMXEXT 0x20000000 +#define SWS_CPU_CAPS_MMX2 0x20000000 +#define SWS_CPU_CAPS_3DNOW 0x40000000 +#define SWS_CPU_CAPS_ALTIVEC 0x10000000 +#define SWS_CPU_CAPS_BFIN 0x01000000 +#define SWS_CPU_CAPS_SSE2 0x02000000 +#endif + +#define SWS_MAX_REDUCE_CUTOFF 0.002 + +#define SWS_CS_ITU709 1 +#define SWS_CS_FCC 4 +#define SWS_CS_ITU601 5 +#define SWS_CS_ITU624 5 +#define SWS_CS_SMPTE170M 5 +#define SWS_CS_SMPTE240M 7 +#define SWS_CS_DEFAULT 5 + +/** + * Return a pointer to yuv<->rgb coefficients for the given colorspace + * suitable for sws_setColorspaceDetails(). + * + * @param colorspace One of the SWS_CS_* macros. If invalid, + * SWS_CS_DEFAULT is used. + */ +const int *sws_getCoefficients(int colorspace); + +// when used for filters they must have an odd number of elements +// coeffs cannot be shared between vectors +typedef struct SwsVector { + double *coeff; ///< pointer to the list of coefficients + int length; ///< number of coefficients in the vector +} SwsVector; + +// vectors can be shared +typedef struct SwsFilter { + SwsVector *lumH; + SwsVector *lumV; + SwsVector *chrH; + SwsVector *chrV; +} SwsFilter; + +struct SwsContext; + +/** + * Return a positive value if pix_fmt is a supported input format, 0 + * otherwise. + */ +int sws_isSupportedInput(enum AVPixelFormat pix_fmt); + +/** + * Return a positive value if pix_fmt is a supported output format, 0 + * otherwise. + */ +int sws_isSupportedOutput(enum AVPixelFormat pix_fmt); + +/** + * @param[in] pix_fmt the pixel format + * @return a positive value if an endianness conversion for pix_fmt is + * supported, 0 otherwise. + */ +int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); + +/** + * Allocate an empty SwsContext. This must be filled and passed to + * sws_init_context(). For filling see AVOptions, options.c and + * sws_setColorspaceDetails(). + */ +struct SwsContext *sws_alloc_context(void); + +/** + * Initialize the swscaler context sws_context. + * + * @return zero or positive value on success, a negative value on + * error + */ +int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); + +/** + * Free the swscaler context swsContext. + * If swsContext is NULL, then does nothing. + */ +void sws_freeContext(struct SwsContext *swsContext); + +#if FF_API_SWS_GETCONTEXT +/** + * Allocate and return an SwsContext. You need it to perform + * scaling/conversion operations using sws_scale(). + * + * @param srcW the width of the source image + * @param srcH the height of the source image + * @param srcFormat the source image format + * @param dstW the width of the destination image + * @param dstH the height of the destination image + * @param dstFormat the destination image format + * @param flags specify which algorithm and options to use for rescaling + * @return a pointer to an allocated context, or NULL in case of error + * @note this function is to be removed after a saner alternative is + * written + * @deprecated Use sws_getCachedContext() instead. + */ +struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); +#endif + +/** + * Scale the image slice in srcSlice and put the resulting scaled + * slice in the image in dst. A slice is a sequence of consecutive + * rows in an image. + * + * Slices have to be provided in sequential order, either in + * top-bottom or bottom-top order. If slices are provided in + * non-sequential order the behavior of the function is undefined. + * + * @param c the scaling context previously created with + * sws_getContext() + * @param srcSlice the array containing the pointers to the planes of + * the source slice + * @param srcStride the array containing the strides for each plane of + * the source image + * @param srcSliceY the position in the source image of the slice to + * process, that is the number (counted starting from + * zero) in the image of the first row of the slice + * @param srcSliceH the height of the source slice, that is the number + * of rows in the slice + * @param dst the array containing the pointers to the planes of + * the destination image + * @param dstStride the array containing the strides for each plane of + * the destination image + * @return the height of the output slice + */ +int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], + const int srcStride[], int srcSliceY, int srcSliceH, + uint8_t *const dst[], const int dstStride[]); + +/** + * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) + * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) + * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] + * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] + * @param brightness 16.16 fixed point brightness correction + * @param contrast 16.16 fixed point contrast correction + * @param saturation 16.16 fixed point saturation correction + * @return -1 if not supported + */ +int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], + int srcRange, const int table[4], int dstRange, + int brightness, int contrast, int saturation); + +/** + * @return -1 if not supported + */ +int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, + int *srcRange, int **table, int *dstRange, + int *brightness, int *contrast, int *saturation); + +/** + * Allocate and return an uninitialized vector with length coefficients. + */ +SwsVector *sws_allocVec(int length); + +/** + * Return a normalized Gaussian curve used to filter stuff + * quality = 3 is high quality, lower is lower quality. + */ +SwsVector *sws_getGaussianVec(double variance, double quality); + +/** + * Allocate and return a vector with length coefficients, all + * with the same value c. + */ +SwsVector *sws_getConstVec(double c, int length); + +/** + * Allocate and return a vector with just one coefficient, with + * value 1.0. + */ +SwsVector *sws_getIdentityVec(void); + +/** + * Scale all the coefficients of a by the scalar value. + */ +void sws_scaleVec(SwsVector *a, double scalar); + +/** + * Scale all the coefficients of a so that their sum equals height. + */ +void sws_normalizeVec(SwsVector *a, double height); +void sws_convVec(SwsVector *a, SwsVector *b); +void sws_addVec(SwsVector *a, SwsVector *b); +void sws_subVec(SwsVector *a, SwsVector *b); +void sws_shiftVec(SwsVector *a, int shift); + +/** + * Allocate and return a clone of the vector a, that is a vector + * with the same coefficients as a. + */ +SwsVector *sws_cloneVec(SwsVector *a); + +/** + * Print with av_log() a textual representation of the vector a + * if log_level <= av_log_level. + */ +void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); + +void sws_freeVec(SwsVector *a); + +SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, + float lumaSharpen, float chromaSharpen, + float chromaHShift, float chromaVShift, + int verbose); +void sws_freeFilter(SwsFilter *filter); + +/** + * Check if context can be reused, otherwise reallocate a new one. + * + * If context is NULL, just calls sws_getContext() to get a new + * context. Otherwise, checks if the parameters are the ones already + * saved in context. If that is the case, returns the current + * context. Otherwise, frees context and gets a new context with + * the new parameters. + * + * Be warned that srcFilter and dstFilter are not checked, they + * are assumed to remain the same. + */ +struct SwsContext *sws_getCachedContext(struct SwsContext *context, + int srcW, int srcH, enum AVPixelFormat srcFormat, + int dstW, int dstH, enum AVPixelFormat dstFormat, + int flags, SwsFilter *srcFilter, + SwsFilter *dstFilter, const double *param); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. + * + * The output frame will have the same packed format as the palette. + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. + * + * With the palette format "ABCD", the destination frame ends up with the format "ABC". + * + * @param src source frame buffer + * @param dst destination frame buffer + * @param num_pixels number of pixels to convert + * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src + */ +void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); + +/** + * Get the AVClass for swsContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *sws_get_class(void); + +/** + * @} + */ + +#endif /* SWSCALE_SWSCALE_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/version.h b/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/version.h new file mode 100644 index 0000000..360ec82 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/include/libswscale/version.h @@ -0,0 +1,59 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWSCALE_VERSION_H +#define SWSCALE_VERSION_H + +/** + * @file + * swscale version macros + */ + +#include "libavutil/avutil.h" + +#define LIBSWSCALE_VERSION_MAJOR 2 +#define LIBSWSCALE_VERSION_MINOR 5 +#define LIBSWSCALE_VERSION_MICRO 101 + +#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ + LIBSWSCALE_VERSION_MINOR, \ + LIBSWSCALE_VERSION_MICRO) +#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT + +#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + */ + +#ifndef FF_API_SWS_GETCONTEXT +#define FF_API_SWS_GETCONTEXT (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_CPU_CAPS +#define FF_API_SWS_CPU_CAPS (LIBSWSCALE_VERSION_MAJOR < 3) +#endif +#ifndef FF_API_SWS_FORMAT_NAME +#define FF_API_SWS_FORMAT_NAME (LIBSWSCALE_VERSION_MAJOR < 3) +#endif + +#endif /* SWSCALE_VERSION_H */ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavcodec.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavcodec.a new file mode 100644 index 0000000..e589b50 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavcodec.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavdevice.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavdevice.a new file mode 100644 index 0000000..c6372ff Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavdevice.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavfilter.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavfilter.a new file mode 100644 index 0000000..318f165 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavfilter.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavformat.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavformat.a new file mode 100644 index 0000000..9fe9b3d Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavformat.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavresample.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavresample.a new file mode 100644 index 0000000..5d05ef5 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libavutil.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libavutil.a new file mode 100644 index 0000000..8fce1c4 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libavutil.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libswresample.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libswresample.a new file mode 100644 index 0000000..ee12ce0 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libswresample.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/libswscale.a b/Limelight-iOS/libs/ffmpeg/i386/lib/libswscale.a new file mode 100644 index 0000000..73e6cd8 Binary files /dev/null and b/Limelight-iOS/libs/ffmpeg/i386/lib/libswscale.a differ diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavcodec.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavcodec.pc new file mode 100644 index 0000000..293bc7f --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavcodec.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavcodec +Description: FFmpeg codec library +Version: 55.39.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavcodec -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavdevice.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavdevice.pc new file mode 100644 index 0000000..a43a171 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavdevice.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavdevice +Description: FFmpeg device handling library +Version: 55.5.100 +Requires: libavfilter = 3.90.100, libavformat = 55.19.104 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavdevice -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavfilter.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavfilter.pc new file mode 100644 index 0000000..a0e02e2 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavfilter.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavfilter +Description: FFmpeg audio/video filtering library +Version: 3.90.100 +Requires: libswresample = 0.17.104, libswscale = 2.5.101, libavresample = 1.1.0, libavformat = 55.19.104, libavcodec = 55.39.101, libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavfilter -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavformat.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavformat.pc new file mode 100644 index 0000000..a481952 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavformat.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavformat +Description: FFmpeg container format library +Version: 55.19.104 +Requires: libavcodec = 55.39.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavformat -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavresample.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavresample.pc new file mode 100644 index 0000000..e6dd6f6 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavresample +Description: Libav audio resampling library +Version: 1.1.0 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lavresample -lm -lz +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavutil.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavutil.pc new file mode 100644 index 0000000..c123a35 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libavutil.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libavutil +Description: FFmpeg utility library +Version: 52.48.101 +Requires: +Requires.private: +Conflicts: +Libs: -L${libdir} -lavutil -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswresample.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswresample.pc new file mode 100644 index 0000000..db1e0b9 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswresample.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswresample +Description: FFmpeg audio resampling library +Version: 0.17.104 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswresample -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswscale.pc b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswscale.pc new file mode 100644 index 0000000..bfb3f15 --- /dev/null +++ b/Limelight-iOS/libs/ffmpeg/i386/lib/pkgconfig/libswscale.pc @@ -0,0 +1,14 @@ +prefix=/Users/diegowaxemberg/Downloads/build-ffmpeg/build/built/i386 +exec_prefix=${prefix} +libdir=${prefix}/lib +includedir=${prefix}/include + +Name: libswscale +Description: FFmpeg image rescaling library +Version: 2.5.101 +Requires: libavutil = 52.48.101 +Requires.private: +Conflicts: +Libs: -L${libdir} -lswscale -lm +Libs.private: +Cflags: -I${includedir} diff --git a/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus.h b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus.h new file mode 100644 index 0000000..ce86038 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus.h @@ -0,0 +1,906 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus.h + * @brief Opus reference implementation API + */ + +#ifndef OPUS_H +#define OPUS_H + +#include "opus_types.h" +#include "opus_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @mainpage Opus + * + * The Opus codec is designed for interactive speech and audio transmission over the Internet. + * It is designed by the IETF Codec Working Group and incorporates technology from + * Skype's SILK codec and Xiph.Org's CELT codec. + * + * The Opus codec is designed to handle a wide range of interactive audio applications, + * including Voice over IP, videoconferencing, in-game chat, and even remote live music + * performances. It can scale from low bit-rate narrowband speech to very high quality + * stereo music. Its main features are: + + * @li Sampling rates from 8 to 48 kHz + * @li Bit-rates from 6 kb/s to 510 kb/s + * @li Support for both constant bit-rate (CBR) and variable bit-rate (VBR) + * @li Audio bandwidth from narrowband to full-band + * @li Support for speech and music + * @li Support for mono and stereo + * @li Support for multichannel (up to 255 channels) + * @li Frame sizes from 2.5 ms to 60 ms + * @li Good loss robustness and packet loss concealment (PLC) + * @li Floating point and fixed-point implementation + * + * Documentation sections: + * @li @ref opus_encoder + * @li @ref opus_decoder + * @li @ref opus_repacketizer + * @li @ref opus_multistream + * @li @ref opus_libinfo + * @li @ref opus_custom + */ + +/** @defgroup opus_encoder Opus Encoder + * @{ + * + * @brief This page describes the process and functions used to encode Opus. + * + * Since Opus is a stateful codec, the encoding process starts with creating an encoder + * state. This can be done with: + * + * @code + * int error; + * OpusEncoder *enc; + * enc = opus_encoder_create(Fs, channels, application, &error); + * @endcode + * + * From this point, @c enc can be used for encoding an audio stream. An encoder state + * @b must @b not be used for more than one stream at the same time. Similarly, the encoder + * state @b must @b not be re-initialized for each frame. + * + * While opus_encoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * + * @code + * int size; + * int error; + * OpusEncoder *enc; + * size = opus_encoder_get_size(channels); + * enc = malloc(size); + * error = opus_encoder_init(enc, Fs, channels, application); + * @endcode + * + * where opus_encoder_get_size() returns the required size for the encoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The encoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * It is possible to change some of the encoder's settings using the opus_encoder_ctl() + * interface. All these settings already default to the recommended value, so they should + * only be changed when necessary. The most common settings one may want to change are: + * + * @code + * opus_encoder_ctl(enc, OPUS_SET_BITRATE(bitrate)); + * opus_encoder_ctl(enc, OPUS_SET_COMPLEXITY(complexity)); + * opus_encoder_ctl(enc, OPUS_SET_SIGNAL(signal_type)); + * @endcode + * + * where + * + * @arg bitrate is in bits per second (b/s) + * @arg complexity is a value from 1 to 10, where 1 is the lowest complexity and 10 is the highest + * @arg signal_type is either OPUS_AUTO (default), OPUS_SIGNAL_VOICE, or OPUS_SIGNAL_MUSIC + * + * See @ref opus_encoderctls and @ref opus_genericctls for a complete list of parameters that can be set or queried. Most parameters can be set or changed at any time during a stream. + * + * To encode a frame, opus_encode() or opus_encode_float() must be called with exactly one frame (2.5, 5, 10, 20, 40 or 60 ms) of audio data: + * @code + * len = opus_encode(enc, audio_frame, frame_size, packet, max_packet); + * @endcode + * + * where + *
    + *
  • audio_frame is the audio data in opus_int16 (or float for opus_encode_float())
  • + *
  • frame_size is the duration of the frame in samples (per channel)
  • + *
  • packet is the byte array to which the compressed data is written
  • + *
  • max_packet is the maximum number of bytes that can be written in the packet (4000 bytes is recommended). + * Do not use max_packet to control VBR target bitrate, instead use the #OPUS_SET_BITRATE CTL.
  • + *
+ * + * opus_encode() and opus_encode_float() return the number of bytes actually written to the packet. + * The return value can be negative, which indicates that an error has occurred. If the return value + * is 1 byte, then the packet does not need to be transmitted (DTX). + * + * Once the encoder state if no longer needed, it can be destroyed with + * + * @code + * opus_encoder_destroy(enc); + * @endcode + * + * If the encoder was created with opus_encoder_init() rather than opus_encoder_create(), + * then no action is required aside from potentially freeing the memory that was manually + * allocated for it (calling free(enc) for the example above) + * + */ + +/** Opus encoder state. + * This contains the complete state of an Opus encoder. + * It is position independent and can be freely copied. + * @see opus_encoder_create,opus_encoder_init + */ +typedef struct OpusEncoder OpusEncoder; + +/** Gets the size of an OpusEncoder structure. + * @param[in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_encoder_get_size(int channels); + +/** + */ + +/** Allocates and initializes an encoder state. + * There are three coding modes: + * + * @ref OPUS_APPLICATION_VOIP gives best quality at a given bitrate for voice + * signals. It enhances the input signal by high-pass filtering and + * emphasizing formants and harmonics. Optionally it includes in-band + * forward error correction to protect against packet loss. Use this + * mode for typical VoIP applications. Because of the enhancement, + * even at high bitrates the output may sound different from the input. + * + * @ref OPUS_APPLICATION_AUDIO gives best quality at a given bitrate for most + * non-voice signals like music. Use this mode for music and mixed + * (music/voice) content, broadcast, and applications requiring less + * than 15 ms of coding delay. + * + * @ref OPUS_APPLICATION_RESTRICTED_LOWDELAY configures low-delay mode that + * disables the speech-optimized mode in exchange for slightly reduced delay. + * This mode can only be set on an newly initialized or freshly reset encoder + * because it changes the codec delay. + * + * This is useful when the caller knows that the speech-optimized modes will not be needed (use with caution). + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (@ref OPUS_APPLICATION_VOIP/@ref OPUS_APPLICATION_AUDIO/@ref OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @param [out] error int*: @ref opus_errorcodes + * @note Regardless of the sampling rate and number channels selected, the Opus encoder + * can switch to a lower audio bandwidth or number of channels if the bitrate + * selected is too low. This also means that it is safe to always use 48 kHz stereo input + * and let the encoder optimize the encoding. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusEncoder *opus_encoder_create( + opus_int32 Fs, + int channels, + int application, + int *error +); + +/** Initializes a previously allocated encoder state + * The memory pointed to by st must be at least the size returned by opus_encoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. + * @see opus_encoder_create(),opus_encoder_get_size() + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (OPUS_APPLICATION_VOIP/OPUS_APPLICATION_AUDIO/OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_encoder_init( + OpusEncoder *st, + opus_int32 Fs, + int channels, + int application +) OPUS_ARG_NONNULL(1); + +/** Encodes an Opus frame. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm opus_int16*: Input signal (interleaved if 2 channels). length is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode( + OpusEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes an Opus frame from floating point input. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm float*: Input in float format (interleaved if 2 channels), with a normal range of +/-1.0. + * Samples with a range beyond +/-1.0 are supported but will + * be clipped by decoders using the integer API and should + * only be used if it is known that the far end supports + * extended dynamic range. + * length is frame_size*channels*sizeof(float) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode_float( + OpusEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusEncoder allocated by opus_encoder_create(). + * @param[in] st OpusEncoder*: State to be freed. + */ +OPUS_EXPORT void opus_encoder_destroy(OpusEncoder *st); + +/** Perform a CTL function on an Opus encoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusEncoder*: Encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_encoderctls. + * @see opus_genericctls + * @see opus_encoderctls + */ +OPUS_EXPORT int opus_encoder_ctl(OpusEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); +/**@}*/ + +/** @defgroup opus_decoder Opus Decoder + * @{ + * + * @brief This page describes the process and functions used to decode Opus. + * + * The decoding process also starts with creating a decoder + * state. This can be done with: + * @code + * int error; + * OpusDecoder *dec; + * dec = opus_decoder_create(Fs, channels, &error); + * @endcode + * where + * @li Fs is the sampling rate and must be 8000, 12000, 16000, 24000, or 48000 + * @li channels is the number of channels (1 or 2) + * @li error will hold the error code in case of failure (or #OPUS_OK on success) + * @li the return value is a newly created decoder state to be used for decoding + * + * While opus_decoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * @code + * int size; + * int error; + * OpusDecoder *dec; + * size = opus_decoder_get_size(channels); + * dec = malloc(size); + * error = opus_decoder_init(dec, Fs, channels); + * @endcode + * where opus_decoder_get_size() returns the required size for the decoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The decoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * To decode a frame, opus_decode() or opus_decode_float() must be called with a packet of compressed audio data: + * @code + * frame_size = opus_decode(dec, packet, len, decoded, max_size, 0); + * @endcode + * where + * + * @li packet is the byte array containing the compressed data + * @li len is the exact number of bytes contained in the packet + * @li decoded is the decoded audio data in opus_int16 (or float for opus_decode_float()) + * @li max_size is the max duration of the frame in samples (per channel) that can fit into the decoded_frame array + * + * opus_decode() and opus_decode_float() return the number of samples (per channel) decoded from the packet. + * If that value is negative, then an error has occurred. This can occur if the packet is corrupted or if the audio + * buffer is too small to hold the decoded audio. + * + * Opus is a stateful codec with overlapping blocks and as a result Opus + * packets are not coded independently of each other. Packets must be + * passed into the decoder serially and in the correct order for a correct + * decode. Lost packets can be replaced with loss concealment by calling + * the decoder with a null pointer and zero length for the missing packet. + * + * A single codec state may only be accessed from a single thread at + * a time and any required locking must be performed by the caller. Separate + * streams must be decoded with separate decoder states and can be decoded + * in parallel unless the library was compiled with NONTHREADSAFE_PSEUDOSTACK + * defined. + * + */ + +/** Opus decoder state. + * This contains the complete state of an Opus decoder. + * It is position independent and can be freely copied. + * @see opus_decoder_create,opus_decoder_init + */ +typedef struct OpusDecoder OpusDecoder; + +/** Gets the size of an OpusDecoder structure. + * @param [in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_size(int channels); + +/** Allocates and initializes a decoder state. + * @param [in] Fs opus_int32: Sample rate to decode at (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @param [out] error int*: #OPUS_OK Success or @ref opus_errorcodes + * + * Internally Opus stores data at 48000 Hz, so that should be the default + * value for Fs. However, the decoder can efficiently decode to buffers + * at 8, 12, 16, and 24 kHz so if for some reason the caller cannot use + * data at the full sample rate, or knows the compressed data doesn't + * use the full frequency range, it can request decoding at a reduced + * rate. Likewise, the decoder is capable of filling in either mono or + * interleaved stereo pcm buffers, at the caller's request. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusDecoder *opus_decoder_create( + opus_int32 Fs, + int channels, + int *error +); + +/** Initializes a previously allocated decoder state. + * The state must be at least the size returned by opus_decoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. @see opus_decoder_create,opus_decoder_get_size + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusDecoder*: Decoder state. + * @param [in] Fs opus_int32: Sampling rate to decode to (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_decoder_init( + OpusDecoder *st, + opus_int32 Fs, + int channels +) OPUS_ARG_NONNULL(1); + +/** Decode an Opus packet. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload* + * @param [out] pcm opus_int16*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available, the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode an Opus packet with floating point output. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload + * @param [out] pcm float*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(float) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode_float( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on an Opus decoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusDecoder*: Decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_decoderctls. + * @see opus_genericctls + * @see opus_decoderctls + */ +OPUS_EXPORT int opus_decoder_ctl(OpusDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusDecoder allocated by opus_decoder_create(). + * @param[in] st OpusDecoder*: State to be freed. + */ +OPUS_EXPORT void opus_decoder_destroy(OpusDecoder *st); + +/** Parse an opus packet into one or more frames. + * Opus_decode will perform this operation internally so most applications do + * not need to use this function. + * This function does not copy the frames, the returned pointers are pointers into + * the input packet. + * @param [in] data char*: Opus packet to be parsed + * @param [in] len opus_int32: size of data + * @param [out] out_toc char*: TOC pointer + * @param [out] frames char*[48] encapsulated frames + * @param [out] size opus_int16[48] sizes of the encapsulated frames + * @param [out] payload_offset int*: returns the position of the payload within the packet (in bytes) + * @returns number of frames + */ +OPUS_EXPORT int opus_packet_parse( + const unsigned char *data, + opus_int32 len, + unsigned char *out_toc, + const unsigned char *frames[48], + opus_int16 size[48], + int *payload_offset +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Gets the bandwidth of an Opus packet. + * @param [in] data char*: Opus packet + * @retval OPUS_BANDWIDTH_NARROWBAND Narrowband (4kHz bandpass) + * @retval OPUS_BANDWIDTH_MEDIUMBAND Mediumband (6kHz bandpass) + * @retval OPUS_BANDWIDTH_WIDEBAND Wideband (8kHz bandpass) + * @retval OPUS_BANDWIDTH_SUPERWIDEBAND Superwideband (12kHz bandpass) + * @retval OPUS_BANDWIDTH_FULLBAND Fullband (20kHz bandpass) + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_bandwidth(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples per frame from an Opus packet. + * @param [in] data char*: Opus packet. + * This must contain at least one byte of + * data. + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples per frame. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_samples_per_frame(const unsigned char *data, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of channels from an Opus packet. + * @param [in] data char*: Opus packet + * @returns Number of channels + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_channels(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of frames in an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of frames + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_frames(const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_samples(const unsigned char packet[], opus_int32 len, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] dec OpusDecoder*: Decoder state + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_nb_samples(const OpusDecoder *dec, const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); +/**@}*/ + +/** @defgroup opus_repacketizer Repacketizer + * @{ + * + * The repacketizer can be used to merge multiple Opus packets into a single + * packet or alternatively to split Opus packets that have previously been + * merged. Splitting valid Opus packets is always guaranteed to succeed, + * whereas merging valid packets only succeeds if all frames have the same + * mode, bandwidth, and frame size, and when the total duration of the merged + * packet is no more than 120 ms. + * The repacketizer currently only operates on elementary Opus + * streams. It will not manipualte multistream packets successfully, except in + * the degenerate case where they consist of data from a single stream. + * + * The repacketizing process starts with creating a repacketizer state, either + * by calling opus_repacketizer_create() or by allocating the memory yourself, + * e.g., + * @code + * OpusRepacketizer *rp; + * rp = (OpusRepacketizer*)malloc(opus_repacketizer_get_size()); + * if (rp != NULL) + * opus_repacketizer_init(rp); + * @endcode + * + * Then the application should submit packets with opus_repacketizer_cat(), + * extract new packets with opus_repacketizer_out() or + * opus_repacketizer_out_range(), and then reset the state for the next set of + * input packets via opus_repacketizer_init(). + * + * For example, to split a sequence of packets into individual frames: + * @code + * unsigned char *data; + * int len; + * while (get_next_packet(&data, &len)) + * { + * unsigned char out[1276]; + * opus_int32 out_len; + * int nb_frames; + * int err; + * int i; + * err = opus_repacketizer_cat(rp, data, len); + * if (err != OPUS_OK) + * { + * release_packet(data); + * return err; + * } + * nb_frames = opus_repacketizer_get_nb_frames(rp); + * for (i = 0; i < nb_frames; i++) + * { + * out_len = opus_repacketizer_out_range(rp, i, i+1, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packet(data); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * } + * opus_repacketizer_init(rp); + * release_packet(data); + * } + * @endcode + * + * Alternatively, to combine a sequence of frames into packets that each + * contain up to TARGET_DURATION_MS milliseconds of data: + * @code + * // The maximum number of packets with duration TARGET_DURATION_MS occurs + * // when the frame size is 2.5 ms, for a total of (TARGET_DURATION_MS*2/5) + * // packets. + * unsigned char *data[(TARGET_DURATION_MS*2/5)+1]; + * opus_int32 len[(TARGET_DURATION_MS*2/5)+1]; + * int nb_packets; + * unsigned char out[1277*(TARGET_DURATION_MS*2/2)]; + * opus_int32 out_len; + * int prev_toc; + * nb_packets = 0; + * while (get_next_packet(data+nb_packets, len+nb_packets)) + * { + * int nb_frames; + * int err; + * nb_frames = opus_packet_get_nb_frames(data[nb_packets], len[nb_packets]); + * if (nb_frames < 1) + * { + * release_packets(data, nb_packets+1); + * return nb_frames; + * } + * nb_frames += opus_repacketizer_get_nb_frames(rp); + * // If adding the next packet would exceed our target, or it has an + * // incompatible TOC sequence, output the packets we already have before + * // submitting it. + * // N.B., The nb_packets > 0 check ensures we've submitted at least one + * // packet since the last call to opus_repacketizer_init(). Otherwise a + * // single packet longer than TARGET_DURATION_MS would cause us to try to + * // output an (invalid) empty packet. It also ensures that prev_toc has + * // been set to a valid value. Additionally, len[nb_packets] > 0 is + * // guaranteed by the call to opus_packet_get_nb_frames() above, so the + * // reference to data[nb_packets][0] should be valid. + * if (nb_packets > 0 && ( + * ((prev_toc & 0xFC) != (data[nb_packets][0] & 0xFC)) || + * opus_packet_get_samples_per_frame(data[nb_packets], 48000)*nb_frames > + * TARGET_DURATION_MS*48)) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packets(data, nb_packets+1); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * opus_repacketizer_init(rp); + * release_packets(data, nb_packets); + * data[0] = data[nb_packets]; + * len[0] = len[nb_packets]; + * nb_packets = 0; + * } + * err = opus_repacketizer_cat(rp, data[nb_packets], len[nb_packets]); + * if (err != OPUS_OK) + * { + * release_packets(data, nb_packets+1); + * return err; + * } + * prev_toc = data[nb_packets][0]; + * nb_packets++; + * } + * // Output the final, partial packet. + * if (nb_packets > 0) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * release_packets(data, nb_packets); + * if (out_len < 0) + * return (int)out_len; + * output_next_packet(out, out_len); + * } + * @endcode + * + * An alternate way of merging packets is to simply call opus_repacketizer_cat() + * unconditionally until it fails. At that point, the merged packet can be + * obtained with opus_repacketizer_out() and the input packet for which + * opus_repacketizer_cat() needs to be re-added to a newly reinitialized + * repacketizer state. + */ + +typedef struct OpusRepacketizer OpusRepacketizer; + +/** Gets the size of an OpusRepacketizer structure. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_size(void); + +/** (Re)initializes a previously allocated repacketizer state. + * The state must be at least the size returned by opus_repacketizer_get_size(). + * This can be used for applications which use their own allocator instead of + * malloc(). + * It must also be called to reset the queue of packets waiting to be + * repacketized, which is necessary if the maximum packet duration of 120 ms + * is reached or if you wish to submit packets with a different Opus + * configuration (coding mode, audio bandwidth, frame size, or channel count). + * Failure to do so will prevent a new packet from being added with + * opus_repacketizer_cat(). + * @see opus_repacketizer_create + * @see opus_repacketizer_get_size + * @see opus_repacketizer_cat + * @param rp OpusRepacketizer*: The repacketizer state to + * (re)initialize. + * @returns A pointer to the same repacketizer state that was passed in. + */ +OPUS_EXPORT OpusRepacketizer *opus_repacketizer_init(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Allocates memory and initializes the new repacketizer with + * opus_repacketizer_init(). + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusRepacketizer *opus_repacketizer_create(void); + +/** Frees an OpusRepacketizer allocated by + * opus_repacketizer_create(). + * @param[in] rp OpusRepacketizer*: State to be freed. + */ +OPUS_EXPORT void opus_repacketizer_destroy(OpusRepacketizer *rp); + +/** Add a packet to the current repacketizer state. + * This packet must match the configuration of any packets already submitted + * for repacketization since the last call to opus_repacketizer_init(). + * This means that it must have the same coding mode, audio bandwidth, frame + * size, and channel count. + * This can be checked in advance by examining the top 6 bits of the first + * byte of the packet, and ensuring they match the top 6 bits of the first + * byte of any previously submitted packet. + * The total duration of audio in the repacketizer state also must not exceed + * 120 ms, the maximum duration of a single packet, after adding this packet. + * + * The contents of the current repacketizer state can be extracted into new + * packets using opus_repacketizer_out() or opus_repacketizer_out_range(). + * + * In order to add a packet with a different configuration or to add more + * audio beyond 120 ms, you must clear the repacketizer state by calling + * opus_repacketizer_init(). + * If a packet is too large to add to the current repacketizer state, no part + * of it is added, even if it contains multiple frames, some of which might + * fit. + * If you wish to be able to add parts of such packets, you should first use + * another repacketizer to split the packet into pieces and add them + * individually. + * @see opus_repacketizer_out_range + * @see opus_repacketizer_out + * @see opus_repacketizer_init + * @param rp OpusRepacketizer*: The repacketizer state to which to + * add the packet. + * @param[in] data const unsigned char*: The packet data. + * The application must ensure + * this pointer remains valid + * until the next call to + * opus_repacketizer_init() or + * opus_repacketizer_destroy(). + * @param len opus_int32: The number of bytes in the packet data. + * @returns An error code indicating whether or not the operation succeeded. + * @retval #OPUS_OK The packet's contents have been added to the repacketizer + * state. + * @retval #OPUS_INVALID_PACKET The packet did not have a valid TOC sequence, + * the packet's TOC sequence was not compatible + * with previously submitted packets (because + * the coding mode, audio bandwidth, frame size, + * or channel count did not match), or adding + * this packet would increase the total amount of + * audio stored in the repacketizer state to more + * than 120 ms. + */ +OPUS_EXPORT int opus_repacketizer_cat(OpusRepacketizer *rp, const unsigned char *data, opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); + + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param begin int: The index of the first frame in the current + * repacketizer state to include in the output. + * @param end int: One past the index of the last frame in the + * current repacketizer state to include in the + * output. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1276 for a single frame, + * or for multiple frames, + * 1277*(end-begin). + * However, 1*(end-begin) plus + * the size of all packet data submitted to + * the repacketizer since the last call to + * opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BAD_ARG [begin,end) was an invalid range of + * frames (begin < 0, begin >= end, or end > + * opus_repacketizer_get_nb_frames()). + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out_range(OpusRepacketizer *rp, int begin, int end, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Return the total number of frames contained in packet data submitted to + * the repacketizer state so far via opus_repacketizer_cat() since the last + * call to opus_repacketizer_init() or opus_repacketizer_create(). + * This defines the valid range of packets that can be extracted with + * opus_repacketizer_out_range() or opus_repacketizer_out(). + * @param rp OpusRepacketizer*: The repacketizer state containing the + * frames. + * @returns The total number of frames contained in the packet data submitted + * to the repacketizer state. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_nb_frames(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * This is a convenience routine that returns all the data submitted so far + * in a single packet. + * It is equivalent to calling + * @code + * opus_repacketizer_out_range(rp, 0, opus_repacketizer_get_nb_frames(rp), + * data, maxlen) + * @endcode + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1277*opus_repacketizer_get_nb_frames(rp). + * However, + * 1*opus_repacketizer_get_nb_frames(rp) + * plus the size of all packet data + * submitted to the repacketizer since the + * last call to opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out(OpusRepacketizer *rp, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_defines.h b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_defines.h new file mode 100644 index 0000000..9fa3ccb --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_defines.h @@ -0,0 +1,655 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_defines.h + * @brief Opus reference implementation constants + */ + +#ifndef OPUS_DEFINES_H +#define OPUS_DEFINES_H + +#include "opus_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @defgroup opus_errorcodes Error codes + * @{ + */ +/** No error @hideinitializer*/ +#define OPUS_OK 0 +/** One or more invalid/out of range arguments @hideinitializer*/ +#define OPUS_BAD_ARG -1 +/** The mode struct passed is invalid @hideinitializer*/ +#define OPUS_BUFFER_TOO_SMALL -2 +/** An internal error was detected @hideinitializer*/ +#define OPUS_INTERNAL_ERROR -3 +/** The compressed data passed is corrupted @hideinitializer*/ +#define OPUS_INVALID_PACKET -4 +/** Invalid/unsupported request number @hideinitializer*/ +#define OPUS_UNIMPLEMENTED -5 +/** An encoder or decoder structure is invalid or already freed @hideinitializer*/ +#define OPUS_INVALID_STATE -6 +/** Memory allocation has failed @hideinitializer*/ +#define OPUS_ALLOC_FAIL -7 +/**@}*/ + +/** @cond OPUS_INTERNAL_DOC */ +/**Export control for opus functions */ + +#ifndef OPUS_EXPORT +# if defined(WIN32) +# ifdef OPUS_BUILD +# define OPUS_EXPORT __declspec(dllexport) +# else +# define OPUS_EXPORT +# endif +# elif defined(__GNUC__) && defined(OPUS_BUILD) +# define OPUS_EXPORT __attribute__ ((visibility ("default"))) +# else +# define OPUS_EXPORT +# endif +#endif + +# if !defined(OPUS_GNUC_PREREQ) +# if defined(__GNUC__)&&defined(__GNUC_MINOR__) +# define OPUS_GNUC_PREREQ(_maj,_min) \ + ((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min)) +# else +# define OPUS_GNUC_PREREQ(_maj,_min) 0 +# endif +# endif + +#if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if OPUS_GNUC_PREREQ(3,0) +# define OPUS_RESTRICT __restrict__ +# elif (defined(_MSC_VER) && _MSC_VER >= 1400) +# define OPUS_RESTRICT __restrict +# else +# define OPUS_RESTRICT +# endif +#else +# define OPUS_RESTRICT restrict +#endif + +/**Warning attributes for opus functions + * NONNULL is not used in OPUS_BUILD to avoid the compiler optimizing out + * some paranoid null checks. */ +#if defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) +#else +# define OPUS_WARN_UNUSED_RESULT +#endif +#if !defined(OPUS_BUILD) && defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x))) +#else +# define OPUS_ARG_NONNULL(_x) +#endif + +/** These are the actual Encoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +#define OPUS_SET_APPLICATION_REQUEST 4000 +#define OPUS_GET_APPLICATION_REQUEST 4001 +#define OPUS_SET_BITRATE_REQUEST 4002 +#define OPUS_GET_BITRATE_REQUEST 4003 +#define OPUS_SET_MAX_BANDWIDTH_REQUEST 4004 +#define OPUS_GET_MAX_BANDWIDTH_REQUEST 4005 +#define OPUS_SET_VBR_REQUEST 4006 +#define OPUS_GET_VBR_REQUEST 4007 +#define OPUS_SET_BANDWIDTH_REQUEST 4008 +#define OPUS_GET_BANDWIDTH_REQUEST 4009 +#define OPUS_SET_COMPLEXITY_REQUEST 4010 +#define OPUS_GET_COMPLEXITY_REQUEST 4011 +#define OPUS_SET_INBAND_FEC_REQUEST 4012 +#define OPUS_GET_INBAND_FEC_REQUEST 4013 +#define OPUS_SET_PACKET_LOSS_PERC_REQUEST 4014 +#define OPUS_GET_PACKET_LOSS_PERC_REQUEST 4015 +#define OPUS_SET_DTX_REQUEST 4016 +#define OPUS_GET_DTX_REQUEST 4017 +#define OPUS_SET_VBR_CONSTRAINT_REQUEST 4020 +#define OPUS_GET_VBR_CONSTRAINT_REQUEST 4021 +#define OPUS_SET_FORCE_CHANNELS_REQUEST 4022 +#define OPUS_GET_FORCE_CHANNELS_REQUEST 4023 +#define OPUS_SET_SIGNAL_REQUEST 4024 +#define OPUS_GET_SIGNAL_REQUEST 4025 +#define OPUS_GET_LOOKAHEAD_REQUEST 4027 +/* #define OPUS_RESET_STATE 4028 */ +#define OPUS_GET_SAMPLE_RATE_REQUEST 4029 +#define OPUS_GET_FINAL_RANGE_REQUEST 4031 +#define OPUS_GET_PITCH_REQUEST 4033 +#define OPUS_SET_GAIN_REQUEST 4034 +#define OPUS_GET_GAIN_REQUEST 4045 /* Should have been 4035 */ +#define OPUS_SET_LSB_DEPTH_REQUEST 4036 +#define OPUS_GET_LSB_DEPTH_REQUEST 4037 + +#define OPUS_GET_LAST_PACKET_DURATION_REQUEST 4039 + +/* Don't use 4045, it's already taken by OPUS_GET_GAIN_REQUEST */ + +/* Macros to trigger compilation errors when the wrong types are provided to a CTL */ +#define __opus_check_int(x) (((void)((x) == (opus_int32)0)), (opus_int32)(x)) +#define __opus_check_int_ptr(ptr) ((ptr) + ((ptr) - (opus_int32*)(ptr))) +#define __opus_check_uint_ptr(ptr) ((ptr) + ((ptr) - (opus_uint32*)(ptr))) +/** @endcond */ + +/** @defgroup opus_ctlvalues Pre-defined values for CTL interface + * @see opus_genericctls, opus_encoderctls + * @{ + */ +/* Values for the various encoder CTLs */ +#define OPUS_AUTO -1000 /**opus_int32: Allowed values: 0-10, inclusive. + * + * @hideinitializer */ +#define OPUS_SET_COMPLEXITY(x) OPUS_SET_COMPLEXITY_REQUEST, __opus_check_int(x) +/** Gets the encoder's complexity configuration. + * @see OPUS_SET_COMPLEXITY + * @param[out] x opus_int32 *: Returns a value in the range 0-10, + * inclusive. + * @hideinitializer */ +#define OPUS_GET_COMPLEXITY(x) OPUS_GET_COMPLEXITY_REQUEST, __opus_check_int_ptr(x) + +/** Configures the bitrate in the encoder. + * Rates from 500 to 512000 bits per second are meaningful, as well as the + * special values #OPUS_AUTO and #OPUS_BITRATE_MAX. + * The value #OPUS_BITRATE_MAX can be used to cause the codec to use as much + * rate as it can, which is useful for controlling the rate by adjusting the + * output buffer size. + * @see OPUS_GET_BITRATE + * @param[in] x opus_int32: Bitrate in bits per second. The default + * is determined based on the number of + * channels and the input sampling rate. + * @hideinitializer */ +#define OPUS_SET_BITRATE(x) OPUS_SET_BITRATE_REQUEST, __opus_check_int(x) +/** Gets the encoder's bitrate configuration. + * @see OPUS_SET_BITRATE + * @param[out] x opus_int32 *: Returns the bitrate in bits per second. + * The default is determined based on the + * number of channels and the input + * sampling rate. + * @hideinitializer */ +#define OPUS_GET_BITRATE(x) OPUS_GET_BITRATE_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables variable bitrate (VBR) in the encoder. + * The configured bitrate may not be met exactly because frames must + * be an integer number of bytes in length. + * @warning Only the MDCT mode of Opus can provide hard CBR behavior. + * @see OPUS_GET_VBR + * @see OPUS_SET_VBR_CONSTRAINT + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Hard CBR. For LPC/hybrid modes at very low bit-rate, this can + * cause noticeable quality degradation.
+ *
1
VBR (default). The exact type of VBR is controlled by + * #OPUS_SET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR(x) OPUS_SET_VBR_REQUEST, __opus_check_int(x) +/** Determine if variable bitrate (VBR) is enabled in the encoder. + * @see OPUS_SET_VBR + * @see OPUS_GET_VBR_CONSTRAINT + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Hard CBR.
+ *
1
VBR (default). The exact type of VBR may be retrieved via + * #OPUS_GET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR(x) OPUS_GET_VBR_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables constrained VBR in the encoder. + * This setting is ignored when the encoder is in CBR mode. + * @warning Only the MDCT mode of Opus currently heeds the constraint. + * Speech mode ignores it completely, hybrid mode may fail to obey it + * if the LPC layer uses more bitrate than the constraint would have + * permitted. + * @see OPUS_GET_VBR_CONSTRAINT + * @see OPUS_SET_VBR + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default). This creates a maximum of one + * frame of buffering delay assuming a transport with a + * serialization speed of the nominal bitrate.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR_CONSTRAINT(x) OPUS_SET_VBR_CONSTRAINT_REQUEST, __opus_check_int(x) +/** Determine if constrained VBR is enabled in the encoder. + * @see OPUS_SET_VBR_CONSTRAINT + * @see OPUS_GET_VBR + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default).
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR_CONSTRAINT(x) OPUS_GET_VBR_CONSTRAINT_REQUEST, __opus_check_int_ptr(x) + +/** Configures mono/stereo forcing in the encoder. + * This can force the encoder to produce packets encoded as either mono or + * stereo, regardless of the format of the input audio. This is useful when + * the caller knows that the input signal is currently a mono source embedded + * in a stereo stream. + * @see OPUS_GET_FORCE_CHANNELS + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_SET_FORCE_CHANNELS(x) OPUS_SET_FORCE_CHANNELS_REQUEST, __opus_check_int(x) +/** Gets the encoder's forced channel configuration. + * @see OPUS_SET_FORCE_CHANNELS + * @param[out] x opus_int32 *: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_GET_FORCE_CHANNELS(x) OPUS_GET_FORCE_CHANNELS_REQUEST, __opus_check_int_ptr(x) + +/** Configures the maximum bandpass that the encoder will select automatically. + * Applications should normally use this instead of #OPUS_SET_BANDWIDTH + * (leaving that set to the default, #OPUS_AUTO). This allows the + * application to set an upper bound based on the type of input it is + * providing, but still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_MAX_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_SET_MAX_BANDWIDTH(x) OPUS_SET_MAX_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Gets the encoder's configured maximum allowed bandpass. + * @see OPUS_SET_MAX_BANDWIDTH + * @param[out] x opus_int32 *: Allowed values: + *
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_GET_MAX_BANDWIDTH(x) OPUS_GET_MAX_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/** Sets the encoder's bandpass to a specific value. + * This prevents the encoder from automatically selecting the bandpass based + * on the available bitrate. If an application knows the bandpass of the input + * audio it is providing, it should normally use #OPUS_SET_MAX_BANDWIDTH + * instead, which still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_SET_BANDWIDTH(x) OPUS_SET_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Configures the type of signal being encoded. + * This is a hint which helps the encoder's mode selection. + * @see OPUS_GET_SIGNAL + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_SET_SIGNAL(x) OPUS_SET_SIGNAL_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal type. + * @see OPUS_SET_SIGNAL + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_GET_SIGNAL(x) OPUS_GET_SIGNAL_REQUEST, __opus_check_int_ptr(x) + + +/** Configures the encoder's intended application. + * The initial value is a mandatory argument to the encoder_create function. + * @see OPUS_GET_APPLICATION + * @param[in] x opus_int32: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_SET_APPLICATION(x) OPUS_SET_APPLICATION_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured application. + * @see OPUS_SET_APPLICATION + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_GET_APPLICATION(x) OPUS_GET_APPLICATION_REQUEST, __opus_check_int_ptr(x) + +/** Gets the sampling rate the encoder or decoder was initialized with. + * This simply returns the Fs value passed to opus_encoder_init() + * or opus_decoder_init(). + * @param[out] x opus_int32 *: Sampling rate of encoder or decoder. + * @hideinitializer + */ +#define OPUS_GET_SAMPLE_RATE(x) OPUS_GET_SAMPLE_RATE_REQUEST, __opus_check_int_ptr(x) + +/** Gets the total samples of delay added by the entire codec. + * This can be queried by the encoder and then the provided number of samples can be + * skipped on from the start of the decoder's output to provide time aligned input + * and output. From the perspective of a decoding application the real data begins this many + * samples late. + * + * The decoder contribution to this delay is identical for all decoders, but the + * encoder portion of the delay may vary from implementation to implementation, + * version to version, or even depend on the encoder's initial configuration. + * Applications needing delay compensation should call this CTL rather than + * hard-coding a value. + * @param[out] x opus_int32 *: Number of lookahead samples + * @hideinitializer */ +#define OPUS_GET_LOOKAHEAD(x) OPUS_GET_LOOKAHEAD_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of inband forward error correction (FEC). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_INBAND_FEC + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable inband FEC (default).
+ *
1
Enable inband FEC.
+ *
+ * @hideinitializer */ +#define OPUS_SET_INBAND_FEC(x) OPUS_SET_INBAND_FEC_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of inband forward error correction. + * @see OPUS_SET_INBAND_FEC + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Inband FEC disabled (default).
+ *
1
Inband FEC enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_INBAND_FEC(x) OPUS_GET_INBAND_FEC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's expected packet loss percentage. + * Higher values with trigger progressively more loss resistant behavior in the encoder + * at the expense of quality at a given bitrate in the lossless case, but greater quality + * under loss. + * @see OPUS_GET_PACKET_LOSS_PERC + * @param[in] x opus_int32: Loss percentage in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_SET_PACKET_LOSS_PERC(x) OPUS_SET_PACKET_LOSS_PERC_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured packet loss percentage. + * @see OPUS_SET_PACKET_LOSS_PERC + * @param[out] x opus_int32 *: Returns the configured loss percentage + * in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_GET_PACKET_LOSS_PERC(x) OPUS_GET_PACKET_LOSS_PERC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of discontinuous transmission (DTX). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_DTX + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable DTX (default).
+ *
1
Enabled DTX.
+ *
+ * @hideinitializer */ +#define OPUS_SET_DTX(x) OPUS_SET_DTX_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of discontinuous transmission. + * @see OPUS_SET_DTX + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
DTX disabled (default).
+ *
1
DTX enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_DTX(x) OPUS_GET_DTX_REQUEST, __opus_check_int_ptr(x) +/** Configures the depth of signal being encoded. + * This is a hint which helps the encoder identify silence and near-silence. + * @see OPUS_GET_LSB_DEPTH + * @param[in] x opus_int32: Input precision in bits, between 8 and 24 + * (default: 24). + * @hideinitializer */ +#define OPUS_SET_LSB_DEPTH(x) OPUS_SET_LSB_DEPTH_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal depth. + * @see OPUS_SET_LSB_DEPTH + * @param[out] x opus_int32 *: Input precision in bits, between 8 and + * 24 (default: 24). + * @hideinitializer */ +#define OPUS_GET_LSB_DEPTH(x) OPUS_GET_LSB_DEPTH_REQUEST, __opus_check_int_ptr(x) + +/** Gets the duration (in samples) of the last packet successfully decoded or concealed. + * @param[out] x opus_int32 *: Number of samples (at current sampling rate). + * @hideinitializer */ +#define OPUS_GET_LAST_PACKET_DURATION(x) OPUS_GET_LAST_PACKET_DURATION_REQUEST, __opus_check_int_ptr(x) +/**@}*/ + +/** @defgroup opus_genericctls Generic CTLs + * + * These macros are used with the \c opus_decoder_ctl and + * \c opus_encoder_ctl calls to generate a particular + * request. + * + * When called on an \c OpusDecoder they apply to that + * particular decoder instance. When called on an + * \c OpusEncoder they apply to the corresponding setting + * on that encoder instance, if present. + * + * Some usage examples: + * + * @code + * int ret; + * opus_int32 pitch; + * ret = opus_decoder_ctl(dec_ctx, OPUS_GET_PITCH(&pitch)); + * if (ret == OPUS_OK) return ret; + * + * opus_encoder_ctl(enc_ctx, OPUS_RESET_STATE); + * opus_decoder_ctl(dec_ctx, OPUS_RESET_STATE); + * + * opus_int32 enc_bw, dec_bw; + * opus_encoder_ctl(enc_ctx, OPUS_GET_BANDWIDTH(&enc_bw)); + * opus_decoder_ctl(dec_ctx, OPUS_GET_BANDWIDTH(&dec_bw)); + * if (enc_bw != dec_bw) { + * printf("packet bandwidth mismatch!\n"); + * } + * @endcode + * + * @see opus_encoder, opus_decoder_ctl, opus_encoder_ctl, opus_decoderctls, opus_encoderctls + * @{ + */ + +/** Resets the codec state to be equivalent to a freshly initialized state. + * This should be called when switching streams in order to prevent + * the back to back decoding from giving different results from + * one at a time decoding. + * @hideinitializer */ +#define OPUS_RESET_STATE 4028 + +/** Gets the final state of the codec's entropy coder. + * This is used for testing purposes, + * The encoder and decoder state should be identical after coding a payload + * (assuming no data corruption or software bugs) + * + * @param[out] x opus_uint32 *: Entropy coder state + * + * @hideinitializer */ +#define OPUS_GET_FINAL_RANGE(x) OPUS_GET_FINAL_RANGE_REQUEST, __opus_check_uint_ptr(x) + +/** Gets the pitch of the last decoded frame, if available. + * This can be used for any post-processing algorithm requiring the use of pitch, + * e.g. time stretching/shortening. If the last frame was not voiced, or if the + * pitch was not coded in the frame, then zero is returned. + * + * This CTL is only implemented for decoder instances. + * + * @param[out] x opus_int32 *: pitch period at 48 kHz (or 0 if not available) + * + * @hideinitializer */ +#define OPUS_GET_PITCH(x) OPUS_GET_PITCH_REQUEST, __opus_check_int_ptr(x) + +/** Gets the encoder's configured bandpass or the decoder's last bandpass. + * @see OPUS_SET_BANDWIDTH + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_GET_BANDWIDTH(x) OPUS_GET_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_decoderctls Decoder related CTLs + * @see opus_genericctls, opus_encoderctls, opus_decoder + * @{ + */ + +/** Configures decoder gain adjustment. + * Scales the decoded output by a factor specified in Q8 dB units. + * This has a maximum range of -32768 to 32767 inclusive, and returns + * OPUS_BAD_ARG otherwise. The default is zero indicating no adjustment. + * This setting survives decoder reset. + * + * gain = pow(10, x/(20.0*256)) + * + * @param[in] x opus_int32: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_SET_GAIN(x) OPUS_SET_GAIN_REQUEST, __opus_check_int(x) +/** Gets the decoder's configured gain adjustment. @see OPUS_SET_GAIN + * + * @param[out] x opus_int32 *: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_GET_GAIN(x) OPUS_GET_GAIN_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_libinfo Opus library information functions + * @{ + */ + +/** Converts an opus error code into a human readable string. + * + * @param[in] error int: Error number + * @returns Error string + */ +OPUS_EXPORT const char *opus_strerror(int error); + +/** Gets the libopus version string. + * + * @returns Version string + */ +OPUS_EXPORT const char *opus_get_version_string(void); +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_DEFINES_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_multistream.h b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_multistream.h new file mode 100644 index 0000000..ae59979 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_multistream.h @@ -0,0 +1,660 @@ +/* Copyright (c) 2011 Xiph.Org Foundation + Written by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_multistream.h + * @brief Opus reference implementation multistream API + */ + +#ifndef OPUS_MULTISTREAM_H +#define OPUS_MULTISTREAM_H + +#include "opus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond OPUS_INTERNAL_DOC */ + +/** Macros to trigger compilation errors when the wrong types are provided to a + * CTL. */ +/**@{*/ +#define __opus_check_encstate_ptr(ptr) ((ptr) + ((ptr) - (OpusEncoder**)(ptr))) +#define __opus_check_decstate_ptr(ptr) ((ptr) + ((ptr) - (OpusDecoder**)(ptr))) +/**@}*/ + +/** These are the actual encoder and decoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +/**@{*/ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST 5120 +#define OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST 5122 +/**@}*/ + +/** @endcond */ + +/** @defgroup opus_multistream_ctls Multistream specific encoder and decoder CTLs + * + * These are convenience macros that are specific to the + * opus_multistream_encoder_ctl() and opus_multistream_decoder_ctl() + * interface. + * The CTLs from @ref opus_genericctls, @ref opus_encoderctls, and + * @ref opus_decoderctls may be applied to a multistream encoder or decoder as + * well. + * In addition, you may retrieve the encoder or decoder state for an specific + * stream via #OPUS_MULTISTREAM_GET_ENCODER_STATE or + * #OPUS_MULTISTREAM_GET_DECODER_STATE and apply CTLs to it individually. + */ +/**@{*/ + +/** Gets the encoder state for an individual stream of a multistream encoder. + * @param[in] x opus_int32: The index of the stream whose encoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the encoder. + * @param[out] y OpusEncoder**: Returns a pointer to the given + * encoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE(x,y) OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST, __opus_check_int(x), __opus_check_encstate_ptr(y) + +/** Gets the decoder state for an individual stream of a multistream decoder. + * @param[in] x opus_int32: The index of the stream whose decoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the decoder. + * @param[out] y OpusDecoder**: Returns a pointer to the given + * decoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_DECODER_STATE(x,y) OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST, __opus_check_int(x), __opus_check_decstate_ptr(y) + +/**@}*/ + +/** @defgroup opus_multistream Opus Multistream API + * @{ + * + * The multistream API allows individual Opus streams to be combined into a + * single packet, enabling support for up to 255 channels. Unlike an + * elementary Opus stream, the encoder and decoder must negotiate the channel + * configuration before the decoder can successfully interpret the data in the + * packets produced by the encoder. Some basic information, such as packet + * duration, can be computed without any special negotiation. + * + * The format for multistream Opus packets is defined in the + * Ogg + * encapsulation specification and is based on the self-delimited Opus + * framing described in Appendix B of RFC 6716. + * Normal Opus packets are just a degenerate case of multistream Opus packets, + * and can be encoded or decoded with the multistream API by setting + * streams to 1 when initializing the encoder or + * decoder. + * + * Multistream Opus streams can contain up to 255 elementary Opus streams. + * These may be either "uncoupled" or "coupled", indicating that the decoder + * is configured to decode them to either 1 or 2 channels, respectively. + * The streams are ordered so that all coupled streams appear at the + * beginning. + * + * A mapping table defines which decoded channel i + * should be used for each input/output (I/O) channel j. This table is + * typically provided as an unsigned char array. + * Let i = mapping[j] be the index for I/O channel j. + * If i < 2*coupled_streams, then I/O channel j is + * encoded as the left channel of stream (i/2) if i + * is even, or as the right channel of stream (i/2) if + * i is odd. Otherwise, I/O channel j is encoded as + * mono in stream (i - coupled_streams), unless it has the special + * value 255, in which case it is omitted from the encoding entirely (the + * decoder will reproduce it as silence). Each value i must either + * be the special value 255 or be less than streams + coupled_streams. + * + * The output channels specified by the encoder + * should use the + * Vorbis + * channel ordering. A decoder may wish to apply an additional permutation + * to the mapping the encoder used to achieve a different output channel + * order (e.g. for outputing in WAV order). + * + * Each multistream packet contains an Opus packet for each stream, and all of + * the Opus packets in a single multistream packet must have the same + * duration. Therefore the duration of a multistream packet can be extracted + * from the TOC sequence of the first stream, which is located at the + * beginning of the packet, just like an elementary Opus stream: + * + * @code + * int nb_samples; + * int nb_frames; + * nb_frames = opus_packet_get_nb_frames(data, len); + * if (nb_frames < 1) + * return nb_frames; + * nb_samples = opus_packet_get_samples_per_frame(data, 48000) * nb_frames; + * @endcode + * + * The general encoding and decoding process proceeds exactly the same as in + * the normal @ref opus_encoder and @ref opus_decoder APIs. + * See their documentation for an overview of how to use the corresponding + * multistream functions. + */ + +/** Opus multistream encoder state. + * This contains the complete state of a multistream Opus encoder. + * It is position independent and can be freely copied. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_init + */ +typedef struct OpusMSEncoder OpusMSEncoder; + +/** Opus multistream decoder state. + * This contains the complete state of a multistream Opus decoder. + * It is position independent and can be freely copied. + * @see opus_multistream_decoder_create + * @see opus_multistream_decoder_init + */ +typedef struct OpusMSDecoder OpusMSDecoder; + +/**\name Multistream encoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSEncoder structure. + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_encoder_get_size( + int streams, + int coupled_streams +); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_surround_encoder_get_size( + int channels, + int mapping_family +); + + +/** Allocates and initializes a multistream encoder state. + * Call opus_multistream_encoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_encoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(5); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_surround_encoder_create( + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(5); + +/** Initialize a previously allocated multistream encoder state. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +OPUS_EXPORT int opus_multistream_surround_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +/** Encodes a multistream Opus frame. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const opus_int16*: The input signal as interleaved + * samples. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode( + OpusMSEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes a multistream Opus frame from floating point input. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const float*: The input signal as interleaved + * samples with a normal range of + * +/-1.0. + * Samples with a range beyond +/-1.0 + * are supported but will be clipped by + * decoders using the integer API and + * should only be used if it is known + * that the far end supports extended + * dynamic range. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode_float( + OpusMSEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusMSEncoder allocated by + * opus_multistream_encoder_create(). + * @param st OpusMSEncoder*: Multistream encoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_encoder_destroy(OpusMSEncoder *st); + +/** Perform a CTL function on a multistream Opus encoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_encoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_encoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_encoder_ctl(OpusMSEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/**@}*/ + +/**\name Multistream decoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSDecoder structure. + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_decoder_get_size( + int streams, + int coupled_streams +); + +/** Allocates and initializes a multistream decoder state. + * Call opus_multistream_decoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSDecoder *opus_multistream_decoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int *error +) OPUS_ARG_NONNULL(5); + +/** Intialize a previously allocated decoder state object. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_decoder_create + * @see opus_multistream_deocder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_decoder_init( + OpusMSDecoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +/** Decode a multistream Opus packet. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode a multistream Opus packet with floating point output. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode_float( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on a multistream Opus decoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_decoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_decoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_decoder_ctl(OpusMSDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusMSDecoder allocated by + * opus_multistream_decoder_create(). + * @param st OpusMSDecoder: Multistream decoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_decoder_destroy(OpusMSDecoder *st); + +/**@}*/ + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_MULTISTREAM_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_types.h b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_types.h new file mode 100644 index 0000000..b28e03a --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/include/opus/opus_types.h @@ -0,0 +1,159 @@ +/* (C) COPYRIGHT 1994-2002 Xiph.Org Foundation */ +/* Modified by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/* opus_types.h based on ogg_types.h from libogg */ + +/** + @file opus_types.h + @brief Opus reference implementation types +*/ +#ifndef OPUS_TYPES_H +#define OPUS_TYPES_H + +/* Use the real stdint.h if it's there (taken from Paul Hsieh's pstdint.h) */ +#if (defined(__STDC__) && __STDC__ && __STDC_VERSION__ >= 199901L) || (defined(__GNUC__) && (defined(_STDINT_H) || defined(_STDINT_H_)) || defined (HAVE_STDINT_H)) +#include + + typedef int16_t opus_int16; + typedef uint16_t opus_uint16; + typedef int32_t opus_int32; + typedef uint32_t opus_uint32; +#elif defined(_WIN32) + +# if defined(__CYGWIN__) +# include <_G_config.h> + typedef _G_int32_t opus_int32; + typedef _G_uint32_t opus_uint32; + typedef _G_int16 opus_int16; + typedef _G_uint16 opus_uint16; +# elif defined(__MINGW32__) + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; +# elif defined(__MWERKS__) + typedef int opus_int32; + typedef unsigned int opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; +# else + /* MSVC/Borland */ + typedef __int32 opus_int32; + typedef unsigned __int32 opus_uint32; + typedef __int16 opus_int16; + typedef unsigned __int16 opus_uint16; +# endif + +#elif defined(__MACOS__) + +# include + typedef SInt16 opus_int16; + typedef UInt16 opus_uint16; + typedef SInt32 opus_int32; + typedef UInt32 opus_uint32; + +#elif (defined(__APPLE__) && defined(__MACH__)) /* MacOS X Framework build */ + +# include + typedef int16_t opus_int16; + typedef u_int16_t opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined(__BEOS__) + + /* Be */ +# include + typedef int16 opus_int16; + typedef u_int16 opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined (__EMX__) + + /* OS/2 GCC */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined (DJGPP) + + /* DJGPP */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(R5900) + + /* PS2 EE */ + typedef int opus_int32; + typedef unsigned opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; + +#elif defined(__SYMBIAN32__) + + /* Symbian GCC */ + typedef signed short opus_int16; + typedef unsigned short opus_uint16; + typedef signed int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(CONFIG_TI_C54X) || defined (CONFIG_TI_C55X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef long opus_int32; + typedef unsigned long opus_uint32; + +#elif defined(CONFIG_TI_C6X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#else + + /* Give up, take a reasonable guess */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#endif + +#define opus_int int /* used for counters etc; at least 16 bits */ +#define opus_int64 long long +#define opus_int8 signed char + +#define opus_uint unsigned int /* used for counters etc; at least 16 bits */ +#define opus_uint64 unsigned long long +#define opus_uint8 unsigned char + +#endif /* OPUS_TYPES_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.a b/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.a new file mode 100644 index 0000000..91f313d Binary files /dev/null and b/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.a differ diff --git a/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.la b/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.la new file mode 100755 index 0000000..499dec8 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/lib/libopus.la @@ -0,0 +1,41 @@ +# libopus.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='' + +# Names of this library. +library_names='' + +# The name of the static archive. +old_library='libopus.a' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L=/usr/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libopus. +current=4 +age=4 +revision=0 + +# Is this an already installed library? +installed=yes + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='/Users/diegowaxemberg/Downloads/opus-ios-build-master/opus-ios-build/dist-armv7/lib' diff --git a/Limelight-iOS/libs/opus/dist-armv7/lib/pkgconfig/opus.pc b/Limelight-iOS/libs/opus/dist-armv7/lib/pkgconfig/opus.pc new file mode 100644 index 0000000..01d19f3 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7/lib/pkgconfig/opus.pc @@ -0,0 +1,16 @@ +# Opus codec reference implementation pkg-config file + +prefix=/Users/diegowaxemberg/Downloads/opus-ios-build-master/opus-ios-build/dist-armv7 +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: Opus +Description: Opus IETF audio codec (floating-point build) +URL: http://opus-codec.org/ +Version: unknown +Requires: +Conflicts: +Libs: -L${libdir} -lopus +Libs.private: +Cflags: -I${includedir}/opus diff --git a/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus.h b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus.h new file mode 100644 index 0000000..ce86038 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus.h @@ -0,0 +1,906 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus.h + * @brief Opus reference implementation API + */ + +#ifndef OPUS_H +#define OPUS_H + +#include "opus_types.h" +#include "opus_defines.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @mainpage Opus + * + * The Opus codec is designed for interactive speech and audio transmission over the Internet. + * It is designed by the IETF Codec Working Group and incorporates technology from + * Skype's SILK codec and Xiph.Org's CELT codec. + * + * The Opus codec is designed to handle a wide range of interactive audio applications, + * including Voice over IP, videoconferencing, in-game chat, and even remote live music + * performances. It can scale from low bit-rate narrowband speech to very high quality + * stereo music. Its main features are: + + * @li Sampling rates from 8 to 48 kHz + * @li Bit-rates from 6 kb/s to 510 kb/s + * @li Support for both constant bit-rate (CBR) and variable bit-rate (VBR) + * @li Audio bandwidth from narrowband to full-band + * @li Support for speech and music + * @li Support for mono and stereo + * @li Support for multichannel (up to 255 channels) + * @li Frame sizes from 2.5 ms to 60 ms + * @li Good loss robustness and packet loss concealment (PLC) + * @li Floating point and fixed-point implementation + * + * Documentation sections: + * @li @ref opus_encoder + * @li @ref opus_decoder + * @li @ref opus_repacketizer + * @li @ref opus_multistream + * @li @ref opus_libinfo + * @li @ref opus_custom + */ + +/** @defgroup opus_encoder Opus Encoder + * @{ + * + * @brief This page describes the process and functions used to encode Opus. + * + * Since Opus is a stateful codec, the encoding process starts with creating an encoder + * state. This can be done with: + * + * @code + * int error; + * OpusEncoder *enc; + * enc = opus_encoder_create(Fs, channels, application, &error); + * @endcode + * + * From this point, @c enc can be used for encoding an audio stream. An encoder state + * @b must @b not be used for more than one stream at the same time. Similarly, the encoder + * state @b must @b not be re-initialized for each frame. + * + * While opus_encoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * + * @code + * int size; + * int error; + * OpusEncoder *enc; + * size = opus_encoder_get_size(channels); + * enc = malloc(size); + * error = opus_encoder_init(enc, Fs, channels, application); + * @endcode + * + * where opus_encoder_get_size() returns the required size for the encoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The encoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * It is possible to change some of the encoder's settings using the opus_encoder_ctl() + * interface. All these settings already default to the recommended value, so they should + * only be changed when necessary. The most common settings one may want to change are: + * + * @code + * opus_encoder_ctl(enc, OPUS_SET_BITRATE(bitrate)); + * opus_encoder_ctl(enc, OPUS_SET_COMPLEXITY(complexity)); + * opus_encoder_ctl(enc, OPUS_SET_SIGNAL(signal_type)); + * @endcode + * + * where + * + * @arg bitrate is in bits per second (b/s) + * @arg complexity is a value from 1 to 10, where 1 is the lowest complexity and 10 is the highest + * @arg signal_type is either OPUS_AUTO (default), OPUS_SIGNAL_VOICE, or OPUS_SIGNAL_MUSIC + * + * See @ref opus_encoderctls and @ref opus_genericctls for a complete list of parameters that can be set or queried. Most parameters can be set or changed at any time during a stream. + * + * To encode a frame, opus_encode() or opus_encode_float() must be called with exactly one frame (2.5, 5, 10, 20, 40 or 60 ms) of audio data: + * @code + * len = opus_encode(enc, audio_frame, frame_size, packet, max_packet); + * @endcode + * + * where + *
    + *
  • audio_frame is the audio data in opus_int16 (or float for opus_encode_float())
  • + *
  • frame_size is the duration of the frame in samples (per channel)
  • + *
  • packet is the byte array to which the compressed data is written
  • + *
  • max_packet is the maximum number of bytes that can be written in the packet (4000 bytes is recommended). + * Do not use max_packet to control VBR target bitrate, instead use the #OPUS_SET_BITRATE CTL.
  • + *
+ * + * opus_encode() and opus_encode_float() return the number of bytes actually written to the packet. + * The return value can be negative, which indicates that an error has occurred. If the return value + * is 1 byte, then the packet does not need to be transmitted (DTX). + * + * Once the encoder state if no longer needed, it can be destroyed with + * + * @code + * opus_encoder_destroy(enc); + * @endcode + * + * If the encoder was created with opus_encoder_init() rather than opus_encoder_create(), + * then no action is required aside from potentially freeing the memory that was manually + * allocated for it (calling free(enc) for the example above) + * + */ + +/** Opus encoder state. + * This contains the complete state of an Opus encoder. + * It is position independent and can be freely copied. + * @see opus_encoder_create,opus_encoder_init + */ +typedef struct OpusEncoder OpusEncoder; + +/** Gets the size of an OpusEncoder structure. + * @param[in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_encoder_get_size(int channels); + +/** + */ + +/** Allocates and initializes an encoder state. + * There are three coding modes: + * + * @ref OPUS_APPLICATION_VOIP gives best quality at a given bitrate for voice + * signals. It enhances the input signal by high-pass filtering and + * emphasizing formants and harmonics. Optionally it includes in-band + * forward error correction to protect against packet loss. Use this + * mode for typical VoIP applications. Because of the enhancement, + * even at high bitrates the output may sound different from the input. + * + * @ref OPUS_APPLICATION_AUDIO gives best quality at a given bitrate for most + * non-voice signals like music. Use this mode for music and mixed + * (music/voice) content, broadcast, and applications requiring less + * than 15 ms of coding delay. + * + * @ref OPUS_APPLICATION_RESTRICTED_LOWDELAY configures low-delay mode that + * disables the speech-optimized mode in exchange for slightly reduced delay. + * This mode can only be set on an newly initialized or freshly reset encoder + * because it changes the codec delay. + * + * This is useful when the caller knows that the speech-optimized modes will not be needed (use with caution). + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (@ref OPUS_APPLICATION_VOIP/@ref OPUS_APPLICATION_AUDIO/@ref OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @param [out] error int*: @ref opus_errorcodes + * @note Regardless of the sampling rate and number channels selected, the Opus encoder + * can switch to a lower audio bandwidth or number of channels if the bitrate + * selected is too low. This also means that it is safe to always use 48 kHz stereo input + * and let the encoder optimize the encoding. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusEncoder *opus_encoder_create( + opus_int32 Fs, + int channels, + int application, + int *error +); + +/** Initializes a previously allocated encoder state + * The memory pointed to by st must be at least the size returned by opus_encoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. + * @see opus_encoder_create(),opus_encoder_get_size() + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] Fs opus_int32: Sampling rate of input signal (Hz) + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) in input signal + * @param [in] application int: Coding mode (OPUS_APPLICATION_VOIP/OPUS_APPLICATION_AUDIO/OPUS_APPLICATION_RESTRICTED_LOWDELAY) + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_encoder_init( + OpusEncoder *st, + opus_int32 Fs, + int channels, + int application +) OPUS_ARG_NONNULL(1); + +/** Encodes an Opus frame. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm opus_int16*: Input signal (interleaved if 2 channels). length is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode( + OpusEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes an Opus frame from floating point input. + * @param [in] st OpusEncoder*: Encoder state + * @param [in] pcm float*: Input in float format (interleaved if 2 channels), with a normal range of +/-1.0. + * Samples with a range beyond +/-1.0 are supported but will + * be clipped by decoders using the integer API and should + * only be used if it is known that the far end supports + * extended dynamic range. + * length is frame_size*channels*sizeof(float) + * @param [in] frame_size int: Number of samples per channel in the + * input signal. + * This must be an Opus frame size for + * the encoder's sampling rate. + * For example, at 48 kHz the permitted + * values are 120, 240, 480, 960, 1920, + * and 2880. + * Passing in a duration of less than + * 10 ms (480 samples at 48 kHz) will + * prevent the encoder from using the LPC + * or hybrid modes. + * @param [out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_encode_float( + OpusEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusEncoder allocated by opus_encoder_create(). + * @param[in] st OpusEncoder*: State to be freed. + */ +OPUS_EXPORT void opus_encoder_destroy(OpusEncoder *st); + +/** Perform a CTL function on an Opus encoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusEncoder*: Encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_encoderctls. + * @see opus_genericctls + * @see opus_encoderctls + */ +OPUS_EXPORT int opus_encoder_ctl(OpusEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); +/**@}*/ + +/** @defgroup opus_decoder Opus Decoder + * @{ + * + * @brief This page describes the process and functions used to decode Opus. + * + * The decoding process also starts with creating a decoder + * state. This can be done with: + * @code + * int error; + * OpusDecoder *dec; + * dec = opus_decoder_create(Fs, channels, &error); + * @endcode + * where + * @li Fs is the sampling rate and must be 8000, 12000, 16000, 24000, or 48000 + * @li channels is the number of channels (1 or 2) + * @li error will hold the error code in case of failure (or #OPUS_OK on success) + * @li the return value is a newly created decoder state to be used for decoding + * + * While opus_decoder_create() allocates memory for the state, it's also possible + * to initialize pre-allocated memory: + * @code + * int size; + * int error; + * OpusDecoder *dec; + * size = opus_decoder_get_size(channels); + * dec = malloc(size); + * error = opus_decoder_init(dec, Fs, channels); + * @endcode + * where opus_decoder_get_size() returns the required size for the decoder state. Note that + * future versions of this code may change the size, so no assuptions should be made about it. + * + * The decoder state is always continuous in memory and only a shallow copy is sufficient + * to copy it (e.g. memcpy()) + * + * To decode a frame, opus_decode() or opus_decode_float() must be called with a packet of compressed audio data: + * @code + * frame_size = opus_decode(dec, packet, len, decoded, max_size, 0); + * @endcode + * where + * + * @li packet is the byte array containing the compressed data + * @li len is the exact number of bytes contained in the packet + * @li decoded is the decoded audio data in opus_int16 (or float for opus_decode_float()) + * @li max_size is the max duration of the frame in samples (per channel) that can fit into the decoded_frame array + * + * opus_decode() and opus_decode_float() return the number of samples (per channel) decoded from the packet. + * If that value is negative, then an error has occurred. This can occur if the packet is corrupted or if the audio + * buffer is too small to hold the decoded audio. + * + * Opus is a stateful codec with overlapping blocks and as a result Opus + * packets are not coded independently of each other. Packets must be + * passed into the decoder serially and in the correct order for a correct + * decode. Lost packets can be replaced with loss concealment by calling + * the decoder with a null pointer and zero length for the missing packet. + * + * A single codec state may only be accessed from a single thread at + * a time and any required locking must be performed by the caller. Separate + * streams must be decoded with separate decoder states and can be decoded + * in parallel unless the library was compiled with NONTHREADSAFE_PSEUDOSTACK + * defined. + * + */ + +/** Opus decoder state. + * This contains the complete state of an Opus decoder. + * It is position independent and can be freely copied. + * @see opus_decoder_create,opus_decoder_init + */ +typedef struct OpusDecoder OpusDecoder; + +/** Gets the size of an OpusDecoder structure. + * @param [in] channels int: Number of channels. + * This must be 1 or 2. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_size(int channels); + +/** Allocates and initializes a decoder state. + * @param [in] Fs opus_int32: Sample rate to decode at (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @param [out] error int*: #OPUS_OK Success or @ref opus_errorcodes + * + * Internally Opus stores data at 48000 Hz, so that should be the default + * value for Fs. However, the decoder can efficiently decode to buffers + * at 8, 12, 16, and 24 kHz so if for some reason the caller cannot use + * data at the full sample rate, or knows the compressed data doesn't + * use the full frequency range, it can request decoding at a reduced + * rate. Likewise, the decoder is capable of filling in either mono or + * interleaved stereo pcm buffers, at the caller's request. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusDecoder *opus_decoder_create( + opus_int32 Fs, + int channels, + int *error +); + +/** Initializes a previously allocated decoder state. + * The state must be at least the size returned by opus_decoder_get_size(). + * This is intended for applications which use their own allocator instead of malloc. @see opus_decoder_create,opus_decoder_get_size + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @param [in] st OpusDecoder*: Decoder state. + * @param [in] Fs opus_int32: Sampling rate to decode to (Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param [in] channels int: Number of channels (1 or 2) to decode + * @retval #OPUS_OK Success or @ref opus_errorcodes + */ +OPUS_EXPORT int opus_decoder_init( + OpusDecoder *st, + opus_int32 Fs, + int channels +) OPUS_ARG_NONNULL(1); + +/** Decode an Opus packet. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload* + * @param [out] pcm opus_int16*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(opus_int16) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available, the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode an Opus packet with floating point output. + * @param [in] st OpusDecoder*: Decoder state + * @param [in] data char*: Input payload. Use a NULL pointer to indicate packet loss + * @param [in] len opus_int32: Number of bytes in payload + * @param [out] pcm float*: Output signal (interleaved if 2 channels). length + * is frame_size*channels*sizeof(float) + * @param [in] frame_size Number of samples per channel of available space in \a pcm. + * If this is less than the maximum packet duration (120ms; 5760 for 48kHz), this function will + * not be capable of decoding some packets. In the case of PLC (data==NULL) or FEC (decode_fec=1), + * then frame_size needs to be exactly the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the next incoming packet. For the PLC and + * FEC cases, frame_size must be a multiple of 2.5 ms. + * @param [in] decode_fec int: Flag (0 or 1) to request that any in-band forward error correction data be + * decoded. If no such data is available the frame is decoded as if it were lost. + * @returns Number of decoded samples or @ref opus_errorcodes + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decode_float( + OpusDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on an Opus decoder. + * + * Generally the request and subsequent arguments are generated + * by a convenience macro. + * @param st OpusDecoder*: Decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls or + * @ref opus_decoderctls. + * @see opus_genericctls + * @see opus_decoderctls + */ +OPUS_EXPORT int opus_decoder_ctl(OpusDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusDecoder allocated by opus_decoder_create(). + * @param[in] st OpusDecoder*: State to be freed. + */ +OPUS_EXPORT void opus_decoder_destroy(OpusDecoder *st); + +/** Parse an opus packet into one or more frames. + * Opus_decode will perform this operation internally so most applications do + * not need to use this function. + * This function does not copy the frames, the returned pointers are pointers into + * the input packet. + * @param [in] data char*: Opus packet to be parsed + * @param [in] len opus_int32: size of data + * @param [out] out_toc char*: TOC pointer + * @param [out] frames char*[48] encapsulated frames + * @param [out] size opus_int16[48] sizes of the encapsulated frames + * @param [out] payload_offset int*: returns the position of the payload within the packet (in bytes) + * @returns number of frames + */ +OPUS_EXPORT int opus_packet_parse( + const unsigned char *data, + opus_int32 len, + unsigned char *out_toc, + const unsigned char *frames[48], + opus_int16 size[48], + int *payload_offset +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Gets the bandwidth of an Opus packet. + * @param [in] data char*: Opus packet + * @retval OPUS_BANDWIDTH_NARROWBAND Narrowband (4kHz bandpass) + * @retval OPUS_BANDWIDTH_MEDIUMBAND Mediumband (6kHz bandpass) + * @retval OPUS_BANDWIDTH_WIDEBAND Wideband (8kHz bandpass) + * @retval OPUS_BANDWIDTH_SUPERWIDEBAND Superwideband (12kHz bandpass) + * @retval OPUS_BANDWIDTH_FULLBAND Fullband (20kHz bandpass) + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_bandwidth(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples per frame from an Opus packet. + * @param [in] data char*: Opus packet. + * This must contain at least one byte of + * data. + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples per frame. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_samples_per_frame(const unsigned char *data, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of channels from an Opus packet. + * @param [in] data char*: Opus packet + * @returns Number of channels + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_channels(const unsigned char *data) OPUS_ARG_NONNULL(1); + +/** Gets the number of frames in an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of frames + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_frames(const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @param [in] Fs opus_int32: Sampling rate in Hz. + * This must be a multiple of 400, or + * inaccurate results will be returned. + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_packet_get_nb_samples(const unsigned char packet[], opus_int32 len, opus_int32 Fs) OPUS_ARG_NONNULL(1); + +/** Gets the number of samples of an Opus packet. + * @param [in] dec OpusDecoder*: Decoder state + * @param [in] packet char*: Opus packet + * @param [in] len opus_int32: Length of packet + * @returns Number of samples + * @retval OPUS_BAD_ARG Insufficient data was passed to the function + * @retval OPUS_INVALID_PACKET The compressed data passed is corrupted or of an unsupported type + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_decoder_get_nb_samples(const OpusDecoder *dec, const unsigned char packet[], opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); +/**@}*/ + +/** @defgroup opus_repacketizer Repacketizer + * @{ + * + * The repacketizer can be used to merge multiple Opus packets into a single + * packet or alternatively to split Opus packets that have previously been + * merged. Splitting valid Opus packets is always guaranteed to succeed, + * whereas merging valid packets only succeeds if all frames have the same + * mode, bandwidth, and frame size, and when the total duration of the merged + * packet is no more than 120 ms. + * The repacketizer currently only operates on elementary Opus + * streams. It will not manipualte multistream packets successfully, except in + * the degenerate case where they consist of data from a single stream. + * + * The repacketizing process starts with creating a repacketizer state, either + * by calling opus_repacketizer_create() or by allocating the memory yourself, + * e.g., + * @code + * OpusRepacketizer *rp; + * rp = (OpusRepacketizer*)malloc(opus_repacketizer_get_size()); + * if (rp != NULL) + * opus_repacketizer_init(rp); + * @endcode + * + * Then the application should submit packets with opus_repacketizer_cat(), + * extract new packets with opus_repacketizer_out() or + * opus_repacketizer_out_range(), and then reset the state for the next set of + * input packets via opus_repacketizer_init(). + * + * For example, to split a sequence of packets into individual frames: + * @code + * unsigned char *data; + * int len; + * while (get_next_packet(&data, &len)) + * { + * unsigned char out[1276]; + * opus_int32 out_len; + * int nb_frames; + * int err; + * int i; + * err = opus_repacketizer_cat(rp, data, len); + * if (err != OPUS_OK) + * { + * release_packet(data); + * return err; + * } + * nb_frames = opus_repacketizer_get_nb_frames(rp); + * for (i = 0; i < nb_frames; i++) + * { + * out_len = opus_repacketizer_out_range(rp, i, i+1, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packet(data); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * } + * opus_repacketizer_init(rp); + * release_packet(data); + * } + * @endcode + * + * Alternatively, to combine a sequence of frames into packets that each + * contain up to TARGET_DURATION_MS milliseconds of data: + * @code + * // The maximum number of packets with duration TARGET_DURATION_MS occurs + * // when the frame size is 2.5 ms, for a total of (TARGET_DURATION_MS*2/5) + * // packets. + * unsigned char *data[(TARGET_DURATION_MS*2/5)+1]; + * opus_int32 len[(TARGET_DURATION_MS*2/5)+1]; + * int nb_packets; + * unsigned char out[1277*(TARGET_DURATION_MS*2/2)]; + * opus_int32 out_len; + * int prev_toc; + * nb_packets = 0; + * while (get_next_packet(data+nb_packets, len+nb_packets)) + * { + * int nb_frames; + * int err; + * nb_frames = opus_packet_get_nb_frames(data[nb_packets], len[nb_packets]); + * if (nb_frames < 1) + * { + * release_packets(data, nb_packets+1); + * return nb_frames; + * } + * nb_frames += opus_repacketizer_get_nb_frames(rp); + * // If adding the next packet would exceed our target, or it has an + * // incompatible TOC sequence, output the packets we already have before + * // submitting it. + * // N.B., The nb_packets > 0 check ensures we've submitted at least one + * // packet since the last call to opus_repacketizer_init(). Otherwise a + * // single packet longer than TARGET_DURATION_MS would cause us to try to + * // output an (invalid) empty packet. It also ensures that prev_toc has + * // been set to a valid value. Additionally, len[nb_packets] > 0 is + * // guaranteed by the call to opus_packet_get_nb_frames() above, so the + * // reference to data[nb_packets][0] should be valid. + * if (nb_packets > 0 && ( + * ((prev_toc & 0xFC) != (data[nb_packets][0] & 0xFC)) || + * opus_packet_get_samples_per_frame(data[nb_packets], 48000)*nb_frames > + * TARGET_DURATION_MS*48)) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * if (out_len < 0) + * { + * release_packets(data, nb_packets+1); + * return (int)out_len; + * } + * output_next_packet(out, out_len); + * opus_repacketizer_init(rp); + * release_packets(data, nb_packets); + * data[0] = data[nb_packets]; + * len[0] = len[nb_packets]; + * nb_packets = 0; + * } + * err = opus_repacketizer_cat(rp, data[nb_packets], len[nb_packets]); + * if (err != OPUS_OK) + * { + * release_packets(data, nb_packets+1); + * return err; + * } + * prev_toc = data[nb_packets][0]; + * nb_packets++; + * } + * // Output the final, partial packet. + * if (nb_packets > 0) + * { + * out_len = opus_repacketizer_out(rp, out, sizeof(out)); + * release_packets(data, nb_packets); + * if (out_len < 0) + * return (int)out_len; + * output_next_packet(out, out_len); + * } + * @endcode + * + * An alternate way of merging packets is to simply call opus_repacketizer_cat() + * unconditionally until it fails. At that point, the merged packet can be + * obtained with opus_repacketizer_out() and the input packet for which + * opus_repacketizer_cat() needs to be re-added to a newly reinitialized + * repacketizer state. + */ + +typedef struct OpusRepacketizer OpusRepacketizer; + +/** Gets the size of an OpusRepacketizer structure. + * @returns The size in bytes. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_size(void); + +/** (Re)initializes a previously allocated repacketizer state. + * The state must be at least the size returned by opus_repacketizer_get_size(). + * This can be used for applications which use their own allocator instead of + * malloc(). + * It must also be called to reset the queue of packets waiting to be + * repacketized, which is necessary if the maximum packet duration of 120 ms + * is reached or if you wish to submit packets with a different Opus + * configuration (coding mode, audio bandwidth, frame size, or channel count). + * Failure to do so will prevent a new packet from being added with + * opus_repacketizer_cat(). + * @see opus_repacketizer_create + * @see opus_repacketizer_get_size + * @see opus_repacketizer_cat + * @param rp OpusRepacketizer*: The repacketizer state to + * (re)initialize. + * @returns A pointer to the same repacketizer state that was passed in. + */ +OPUS_EXPORT OpusRepacketizer *opus_repacketizer_init(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Allocates memory and initializes the new repacketizer with + * opus_repacketizer_init(). + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusRepacketizer *opus_repacketizer_create(void); + +/** Frees an OpusRepacketizer allocated by + * opus_repacketizer_create(). + * @param[in] rp OpusRepacketizer*: State to be freed. + */ +OPUS_EXPORT void opus_repacketizer_destroy(OpusRepacketizer *rp); + +/** Add a packet to the current repacketizer state. + * This packet must match the configuration of any packets already submitted + * for repacketization since the last call to opus_repacketizer_init(). + * This means that it must have the same coding mode, audio bandwidth, frame + * size, and channel count. + * This can be checked in advance by examining the top 6 bits of the first + * byte of the packet, and ensuring they match the top 6 bits of the first + * byte of any previously submitted packet. + * The total duration of audio in the repacketizer state also must not exceed + * 120 ms, the maximum duration of a single packet, after adding this packet. + * + * The contents of the current repacketizer state can be extracted into new + * packets using opus_repacketizer_out() or opus_repacketizer_out_range(). + * + * In order to add a packet with a different configuration or to add more + * audio beyond 120 ms, you must clear the repacketizer state by calling + * opus_repacketizer_init(). + * If a packet is too large to add to the current repacketizer state, no part + * of it is added, even if it contains multiple frames, some of which might + * fit. + * If you wish to be able to add parts of such packets, you should first use + * another repacketizer to split the packet into pieces and add them + * individually. + * @see opus_repacketizer_out_range + * @see opus_repacketizer_out + * @see opus_repacketizer_init + * @param rp OpusRepacketizer*: The repacketizer state to which to + * add the packet. + * @param[in] data const unsigned char*: The packet data. + * The application must ensure + * this pointer remains valid + * until the next call to + * opus_repacketizer_init() or + * opus_repacketizer_destroy(). + * @param len opus_int32: The number of bytes in the packet data. + * @returns An error code indicating whether or not the operation succeeded. + * @retval #OPUS_OK The packet's contents have been added to the repacketizer + * state. + * @retval #OPUS_INVALID_PACKET The packet did not have a valid TOC sequence, + * the packet's TOC sequence was not compatible + * with previously submitted packets (because + * the coding mode, audio bandwidth, frame size, + * or channel count did not match), or adding + * this packet would increase the total amount of + * audio stored in the repacketizer state to more + * than 120 ms. + */ +OPUS_EXPORT int opus_repacketizer_cat(OpusRepacketizer *rp, const unsigned char *data, opus_int32 len) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2); + + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param begin int: The index of the first frame in the current + * repacketizer state to include in the output. + * @param end int: One past the index of the last frame in the + * current repacketizer state to include in the + * output. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1276 for a single frame, + * or for multiple frames, + * 1277*(end-begin). + * However, 1*(end-begin) plus + * the size of all packet data submitted to + * the repacketizer since the last call to + * opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BAD_ARG [begin,end) was an invalid range of + * frames (begin < 0, begin >= end, or end > + * opus_repacketizer_get_nb_frames()). + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out_range(OpusRepacketizer *rp, int begin, int end, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Return the total number of frames contained in packet data submitted to + * the repacketizer state so far via opus_repacketizer_cat() since the last + * call to opus_repacketizer_init() or opus_repacketizer_create(). + * This defines the valid range of packets that can be extracted with + * opus_repacketizer_out_range() or opus_repacketizer_out(). + * @param rp OpusRepacketizer*: The repacketizer state containing the + * frames. + * @returns The total number of frames contained in the packet data submitted + * to the repacketizer state. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_repacketizer_get_nb_frames(OpusRepacketizer *rp) OPUS_ARG_NONNULL(1); + +/** Construct a new packet from data previously submitted to the repacketizer + * state via opus_repacketizer_cat(). + * This is a convenience routine that returns all the data submitted so far + * in a single packet. + * It is equivalent to calling + * @code + * opus_repacketizer_out_range(rp, 0, opus_repacketizer_get_nb_frames(rp), + * data, maxlen) + * @endcode + * @param rp OpusRepacketizer*: The repacketizer state from which to + * construct the new packet. + * @param[out] data const unsigned char*: The buffer in which to + * store the output packet. + * @param maxlen opus_int32: The maximum number of bytes to store in + * the output buffer. In order to guarantee + * success, this should be at least + * 1277*opus_repacketizer_get_nb_frames(rp). + * However, + * 1*opus_repacketizer_get_nb_frames(rp) + * plus the size of all packet data + * submitted to the repacketizer since the + * last call to opus_repacketizer_init() or + * opus_repacketizer_create() is also + * sufficient, and possibly much smaller. + * @returns The total size of the output packet on success, or an error code + * on failure. + * @retval #OPUS_BUFFER_TOO_SMALL \a maxlen was insufficient to contain the + * complete output packet. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_repacketizer_out(OpusRepacketizer *rp, unsigned char *data, opus_int32 maxlen) OPUS_ARG_NONNULL(1); + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_defines.h b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_defines.h new file mode 100644 index 0000000..9fa3ccb --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_defines.h @@ -0,0 +1,655 @@ +/* Copyright (c) 2010-2011 Xiph.Org Foundation, Skype Limited + Written by Jean-Marc Valin and Koen Vos */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_defines.h + * @brief Opus reference implementation constants + */ + +#ifndef OPUS_DEFINES_H +#define OPUS_DEFINES_H + +#include "opus_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @defgroup opus_errorcodes Error codes + * @{ + */ +/** No error @hideinitializer*/ +#define OPUS_OK 0 +/** One or more invalid/out of range arguments @hideinitializer*/ +#define OPUS_BAD_ARG -1 +/** The mode struct passed is invalid @hideinitializer*/ +#define OPUS_BUFFER_TOO_SMALL -2 +/** An internal error was detected @hideinitializer*/ +#define OPUS_INTERNAL_ERROR -3 +/** The compressed data passed is corrupted @hideinitializer*/ +#define OPUS_INVALID_PACKET -4 +/** Invalid/unsupported request number @hideinitializer*/ +#define OPUS_UNIMPLEMENTED -5 +/** An encoder or decoder structure is invalid or already freed @hideinitializer*/ +#define OPUS_INVALID_STATE -6 +/** Memory allocation has failed @hideinitializer*/ +#define OPUS_ALLOC_FAIL -7 +/**@}*/ + +/** @cond OPUS_INTERNAL_DOC */ +/**Export control for opus functions */ + +#ifndef OPUS_EXPORT +# if defined(WIN32) +# ifdef OPUS_BUILD +# define OPUS_EXPORT __declspec(dllexport) +# else +# define OPUS_EXPORT +# endif +# elif defined(__GNUC__) && defined(OPUS_BUILD) +# define OPUS_EXPORT __attribute__ ((visibility ("default"))) +# else +# define OPUS_EXPORT +# endif +#endif + +# if !defined(OPUS_GNUC_PREREQ) +# if defined(__GNUC__)&&defined(__GNUC_MINOR__) +# define OPUS_GNUC_PREREQ(_maj,_min) \ + ((__GNUC__<<16)+__GNUC_MINOR__>=((_maj)<<16)+(_min)) +# else +# define OPUS_GNUC_PREREQ(_maj,_min) 0 +# endif +# endif + +#if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) ) +# if OPUS_GNUC_PREREQ(3,0) +# define OPUS_RESTRICT __restrict__ +# elif (defined(_MSC_VER) && _MSC_VER >= 1400) +# define OPUS_RESTRICT __restrict +# else +# define OPUS_RESTRICT +# endif +#else +# define OPUS_RESTRICT restrict +#endif + +/**Warning attributes for opus functions + * NONNULL is not used in OPUS_BUILD to avoid the compiler optimizing out + * some paranoid null checks. */ +#if defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_WARN_UNUSED_RESULT __attribute__ ((__warn_unused_result__)) +#else +# define OPUS_WARN_UNUSED_RESULT +#endif +#if !defined(OPUS_BUILD) && defined(__GNUC__) && OPUS_GNUC_PREREQ(3, 4) +# define OPUS_ARG_NONNULL(_x) __attribute__ ((__nonnull__(_x))) +#else +# define OPUS_ARG_NONNULL(_x) +#endif + +/** These are the actual Encoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +#define OPUS_SET_APPLICATION_REQUEST 4000 +#define OPUS_GET_APPLICATION_REQUEST 4001 +#define OPUS_SET_BITRATE_REQUEST 4002 +#define OPUS_GET_BITRATE_REQUEST 4003 +#define OPUS_SET_MAX_BANDWIDTH_REQUEST 4004 +#define OPUS_GET_MAX_BANDWIDTH_REQUEST 4005 +#define OPUS_SET_VBR_REQUEST 4006 +#define OPUS_GET_VBR_REQUEST 4007 +#define OPUS_SET_BANDWIDTH_REQUEST 4008 +#define OPUS_GET_BANDWIDTH_REQUEST 4009 +#define OPUS_SET_COMPLEXITY_REQUEST 4010 +#define OPUS_GET_COMPLEXITY_REQUEST 4011 +#define OPUS_SET_INBAND_FEC_REQUEST 4012 +#define OPUS_GET_INBAND_FEC_REQUEST 4013 +#define OPUS_SET_PACKET_LOSS_PERC_REQUEST 4014 +#define OPUS_GET_PACKET_LOSS_PERC_REQUEST 4015 +#define OPUS_SET_DTX_REQUEST 4016 +#define OPUS_GET_DTX_REQUEST 4017 +#define OPUS_SET_VBR_CONSTRAINT_REQUEST 4020 +#define OPUS_GET_VBR_CONSTRAINT_REQUEST 4021 +#define OPUS_SET_FORCE_CHANNELS_REQUEST 4022 +#define OPUS_GET_FORCE_CHANNELS_REQUEST 4023 +#define OPUS_SET_SIGNAL_REQUEST 4024 +#define OPUS_GET_SIGNAL_REQUEST 4025 +#define OPUS_GET_LOOKAHEAD_REQUEST 4027 +/* #define OPUS_RESET_STATE 4028 */ +#define OPUS_GET_SAMPLE_RATE_REQUEST 4029 +#define OPUS_GET_FINAL_RANGE_REQUEST 4031 +#define OPUS_GET_PITCH_REQUEST 4033 +#define OPUS_SET_GAIN_REQUEST 4034 +#define OPUS_GET_GAIN_REQUEST 4045 /* Should have been 4035 */ +#define OPUS_SET_LSB_DEPTH_REQUEST 4036 +#define OPUS_GET_LSB_DEPTH_REQUEST 4037 + +#define OPUS_GET_LAST_PACKET_DURATION_REQUEST 4039 + +/* Don't use 4045, it's already taken by OPUS_GET_GAIN_REQUEST */ + +/* Macros to trigger compilation errors when the wrong types are provided to a CTL */ +#define __opus_check_int(x) (((void)((x) == (opus_int32)0)), (opus_int32)(x)) +#define __opus_check_int_ptr(ptr) ((ptr) + ((ptr) - (opus_int32*)(ptr))) +#define __opus_check_uint_ptr(ptr) ((ptr) + ((ptr) - (opus_uint32*)(ptr))) +/** @endcond */ + +/** @defgroup opus_ctlvalues Pre-defined values for CTL interface + * @see opus_genericctls, opus_encoderctls + * @{ + */ +/* Values for the various encoder CTLs */ +#define OPUS_AUTO -1000 /**opus_int32: Allowed values: 0-10, inclusive. + * + * @hideinitializer */ +#define OPUS_SET_COMPLEXITY(x) OPUS_SET_COMPLEXITY_REQUEST, __opus_check_int(x) +/** Gets the encoder's complexity configuration. + * @see OPUS_SET_COMPLEXITY + * @param[out] x opus_int32 *: Returns a value in the range 0-10, + * inclusive. + * @hideinitializer */ +#define OPUS_GET_COMPLEXITY(x) OPUS_GET_COMPLEXITY_REQUEST, __opus_check_int_ptr(x) + +/** Configures the bitrate in the encoder. + * Rates from 500 to 512000 bits per second are meaningful, as well as the + * special values #OPUS_AUTO and #OPUS_BITRATE_MAX. + * The value #OPUS_BITRATE_MAX can be used to cause the codec to use as much + * rate as it can, which is useful for controlling the rate by adjusting the + * output buffer size. + * @see OPUS_GET_BITRATE + * @param[in] x opus_int32: Bitrate in bits per second. The default + * is determined based on the number of + * channels and the input sampling rate. + * @hideinitializer */ +#define OPUS_SET_BITRATE(x) OPUS_SET_BITRATE_REQUEST, __opus_check_int(x) +/** Gets the encoder's bitrate configuration. + * @see OPUS_SET_BITRATE + * @param[out] x opus_int32 *: Returns the bitrate in bits per second. + * The default is determined based on the + * number of channels and the input + * sampling rate. + * @hideinitializer */ +#define OPUS_GET_BITRATE(x) OPUS_GET_BITRATE_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables variable bitrate (VBR) in the encoder. + * The configured bitrate may not be met exactly because frames must + * be an integer number of bytes in length. + * @warning Only the MDCT mode of Opus can provide hard CBR behavior. + * @see OPUS_GET_VBR + * @see OPUS_SET_VBR_CONSTRAINT + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Hard CBR. For LPC/hybrid modes at very low bit-rate, this can + * cause noticeable quality degradation.
+ *
1
VBR (default). The exact type of VBR is controlled by + * #OPUS_SET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR(x) OPUS_SET_VBR_REQUEST, __opus_check_int(x) +/** Determine if variable bitrate (VBR) is enabled in the encoder. + * @see OPUS_SET_VBR + * @see OPUS_GET_VBR_CONSTRAINT + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Hard CBR.
+ *
1
VBR (default). The exact type of VBR may be retrieved via + * #OPUS_GET_VBR_CONSTRAINT.
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR(x) OPUS_GET_VBR_REQUEST, __opus_check_int_ptr(x) + +/** Enables or disables constrained VBR in the encoder. + * This setting is ignored when the encoder is in CBR mode. + * @warning Only the MDCT mode of Opus currently heeds the constraint. + * Speech mode ignores it completely, hybrid mode may fail to obey it + * if the LPC layer uses more bitrate than the constraint would have + * permitted. + * @see OPUS_GET_VBR_CONSTRAINT + * @see OPUS_SET_VBR + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default). This creates a maximum of one + * frame of buffering delay assuming a transport with a + * serialization speed of the nominal bitrate.
+ *
+ * @hideinitializer */ +#define OPUS_SET_VBR_CONSTRAINT(x) OPUS_SET_VBR_CONSTRAINT_REQUEST, __opus_check_int(x) +/** Determine if constrained VBR is enabled in the encoder. + * @see OPUS_SET_VBR_CONSTRAINT + * @see OPUS_GET_VBR + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Unconstrained VBR.
+ *
1
Constrained VBR (default).
+ *
+ * @hideinitializer */ +#define OPUS_GET_VBR_CONSTRAINT(x) OPUS_GET_VBR_CONSTRAINT_REQUEST, __opus_check_int_ptr(x) + +/** Configures mono/stereo forcing in the encoder. + * This can force the encoder to produce packets encoded as either mono or + * stereo, regardless of the format of the input audio. This is useful when + * the caller knows that the input signal is currently a mono source embedded + * in a stereo stream. + * @see OPUS_GET_FORCE_CHANNELS + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_SET_FORCE_CHANNELS(x) OPUS_SET_FORCE_CHANNELS_REQUEST, __opus_check_int(x) +/** Gets the encoder's forced channel configuration. + * @see OPUS_SET_FORCE_CHANNELS + * @param[out] x opus_int32 *: + *
+ *
#OPUS_AUTO
Not forced (default)
+ *
1
Forced mono
+ *
2
Forced stereo
+ *
+ * @hideinitializer */ +#define OPUS_GET_FORCE_CHANNELS(x) OPUS_GET_FORCE_CHANNELS_REQUEST, __opus_check_int_ptr(x) + +/** Configures the maximum bandpass that the encoder will select automatically. + * Applications should normally use this instead of #OPUS_SET_BANDWIDTH + * (leaving that set to the default, #OPUS_AUTO). This allows the + * application to set an upper bound based on the type of input it is + * providing, but still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_MAX_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_SET_MAX_BANDWIDTH(x) OPUS_SET_MAX_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Gets the encoder's configured maximum allowed bandpass. + * @see OPUS_SET_MAX_BANDWIDTH + * @param[out] x opus_int32 *: Allowed values: + *
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband (default)
+ *
+ * @hideinitializer */ +#define OPUS_GET_MAX_BANDWIDTH(x) OPUS_GET_MAX_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/** Sets the encoder's bandpass to a specific value. + * This prevents the encoder from automatically selecting the bandpass based + * on the available bitrate. If an application knows the bandpass of the input + * audio it is providing, it should normally use #OPUS_SET_MAX_BANDWIDTH + * instead, which still gives the encoder the freedom to reduce the bandpass + * when the bitrate becomes too low, for better overall quality. + * @see OPUS_GET_BANDWIDTH + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_SET_BANDWIDTH(x) OPUS_SET_BANDWIDTH_REQUEST, __opus_check_int(x) + +/** Configures the type of signal being encoded. + * This is a hint which helps the encoder's mode selection. + * @see OPUS_GET_SIGNAL + * @param[in] x opus_int32: Allowed values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_SET_SIGNAL(x) OPUS_SET_SIGNAL_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal type. + * @see OPUS_SET_SIGNAL + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_SIGNAL_VOICE
Bias thresholds towards choosing LPC or Hybrid modes.
+ *
#OPUS_SIGNAL_MUSIC
Bias thresholds towards choosing MDCT modes.
+ *
+ * @hideinitializer */ +#define OPUS_GET_SIGNAL(x) OPUS_GET_SIGNAL_REQUEST, __opus_check_int_ptr(x) + + +/** Configures the encoder's intended application. + * The initial value is a mandatory argument to the encoder_create function. + * @see OPUS_GET_APPLICATION + * @param[in] x opus_int32: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_SET_APPLICATION(x) OPUS_SET_APPLICATION_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured application. + * @see OPUS_SET_APPLICATION + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @hideinitializer */ +#define OPUS_GET_APPLICATION(x) OPUS_GET_APPLICATION_REQUEST, __opus_check_int_ptr(x) + +/** Gets the sampling rate the encoder or decoder was initialized with. + * This simply returns the Fs value passed to opus_encoder_init() + * or opus_decoder_init(). + * @param[out] x opus_int32 *: Sampling rate of encoder or decoder. + * @hideinitializer + */ +#define OPUS_GET_SAMPLE_RATE(x) OPUS_GET_SAMPLE_RATE_REQUEST, __opus_check_int_ptr(x) + +/** Gets the total samples of delay added by the entire codec. + * This can be queried by the encoder and then the provided number of samples can be + * skipped on from the start of the decoder's output to provide time aligned input + * and output. From the perspective of a decoding application the real data begins this many + * samples late. + * + * The decoder contribution to this delay is identical for all decoders, but the + * encoder portion of the delay may vary from implementation to implementation, + * version to version, or even depend on the encoder's initial configuration. + * Applications needing delay compensation should call this CTL rather than + * hard-coding a value. + * @param[out] x opus_int32 *: Number of lookahead samples + * @hideinitializer */ +#define OPUS_GET_LOOKAHEAD(x) OPUS_GET_LOOKAHEAD_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of inband forward error correction (FEC). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_INBAND_FEC + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable inband FEC (default).
+ *
1
Enable inband FEC.
+ *
+ * @hideinitializer */ +#define OPUS_SET_INBAND_FEC(x) OPUS_SET_INBAND_FEC_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of inband forward error correction. + * @see OPUS_SET_INBAND_FEC + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
Inband FEC disabled (default).
+ *
1
Inband FEC enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_INBAND_FEC(x) OPUS_GET_INBAND_FEC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's expected packet loss percentage. + * Higher values with trigger progressively more loss resistant behavior in the encoder + * at the expense of quality at a given bitrate in the lossless case, but greater quality + * under loss. + * @see OPUS_GET_PACKET_LOSS_PERC + * @param[in] x opus_int32: Loss percentage in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_SET_PACKET_LOSS_PERC(x) OPUS_SET_PACKET_LOSS_PERC_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured packet loss percentage. + * @see OPUS_SET_PACKET_LOSS_PERC + * @param[out] x opus_int32 *: Returns the configured loss percentage + * in the range 0-100, inclusive (default: 0). + * @hideinitializer */ +#define OPUS_GET_PACKET_LOSS_PERC(x) OPUS_GET_PACKET_LOSS_PERC_REQUEST, __opus_check_int_ptr(x) + +/** Configures the encoder's use of discontinuous transmission (DTX). + * @note This is only applicable to the LPC layer + * @see OPUS_GET_DTX + * @param[in] x opus_int32: Allowed values: + *
+ *
0
Disable DTX (default).
+ *
1
Enabled DTX.
+ *
+ * @hideinitializer */ +#define OPUS_SET_DTX(x) OPUS_SET_DTX_REQUEST, __opus_check_int(x) +/** Gets encoder's configured use of discontinuous transmission. + * @see OPUS_SET_DTX + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
0
DTX disabled (default).
+ *
1
DTX enabled.
+ *
+ * @hideinitializer */ +#define OPUS_GET_DTX(x) OPUS_GET_DTX_REQUEST, __opus_check_int_ptr(x) +/** Configures the depth of signal being encoded. + * This is a hint which helps the encoder identify silence and near-silence. + * @see OPUS_GET_LSB_DEPTH + * @param[in] x opus_int32: Input precision in bits, between 8 and 24 + * (default: 24). + * @hideinitializer */ +#define OPUS_SET_LSB_DEPTH(x) OPUS_SET_LSB_DEPTH_REQUEST, __opus_check_int(x) +/** Gets the encoder's configured signal depth. + * @see OPUS_SET_LSB_DEPTH + * @param[out] x opus_int32 *: Input precision in bits, between 8 and + * 24 (default: 24). + * @hideinitializer */ +#define OPUS_GET_LSB_DEPTH(x) OPUS_GET_LSB_DEPTH_REQUEST, __opus_check_int_ptr(x) + +/** Gets the duration (in samples) of the last packet successfully decoded or concealed. + * @param[out] x opus_int32 *: Number of samples (at current sampling rate). + * @hideinitializer */ +#define OPUS_GET_LAST_PACKET_DURATION(x) OPUS_GET_LAST_PACKET_DURATION_REQUEST, __opus_check_int_ptr(x) +/**@}*/ + +/** @defgroup opus_genericctls Generic CTLs + * + * These macros are used with the \c opus_decoder_ctl and + * \c opus_encoder_ctl calls to generate a particular + * request. + * + * When called on an \c OpusDecoder they apply to that + * particular decoder instance. When called on an + * \c OpusEncoder they apply to the corresponding setting + * on that encoder instance, if present. + * + * Some usage examples: + * + * @code + * int ret; + * opus_int32 pitch; + * ret = opus_decoder_ctl(dec_ctx, OPUS_GET_PITCH(&pitch)); + * if (ret == OPUS_OK) return ret; + * + * opus_encoder_ctl(enc_ctx, OPUS_RESET_STATE); + * opus_decoder_ctl(dec_ctx, OPUS_RESET_STATE); + * + * opus_int32 enc_bw, dec_bw; + * opus_encoder_ctl(enc_ctx, OPUS_GET_BANDWIDTH(&enc_bw)); + * opus_decoder_ctl(dec_ctx, OPUS_GET_BANDWIDTH(&dec_bw)); + * if (enc_bw != dec_bw) { + * printf("packet bandwidth mismatch!\n"); + * } + * @endcode + * + * @see opus_encoder, opus_decoder_ctl, opus_encoder_ctl, opus_decoderctls, opus_encoderctls + * @{ + */ + +/** Resets the codec state to be equivalent to a freshly initialized state. + * This should be called when switching streams in order to prevent + * the back to back decoding from giving different results from + * one at a time decoding. + * @hideinitializer */ +#define OPUS_RESET_STATE 4028 + +/** Gets the final state of the codec's entropy coder. + * This is used for testing purposes, + * The encoder and decoder state should be identical after coding a payload + * (assuming no data corruption or software bugs) + * + * @param[out] x opus_uint32 *: Entropy coder state + * + * @hideinitializer */ +#define OPUS_GET_FINAL_RANGE(x) OPUS_GET_FINAL_RANGE_REQUEST, __opus_check_uint_ptr(x) + +/** Gets the pitch of the last decoded frame, if available. + * This can be used for any post-processing algorithm requiring the use of pitch, + * e.g. time stretching/shortening. If the last frame was not voiced, or if the + * pitch was not coded in the frame, then zero is returned. + * + * This CTL is only implemented for decoder instances. + * + * @param[out] x opus_int32 *: pitch period at 48 kHz (or 0 if not available) + * + * @hideinitializer */ +#define OPUS_GET_PITCH(x) OPUS_GET_PITCH_REQUEST, __opus_check_int_ptr(x) + +/** Gets the encoder's configured bandpass or the decoder's last bandpass. + * @see OPUS_SET_BANDWIDTH + * @param[out] x opus_int32 *: Returns one of the following values: + *
+ *
#OPUS_AUTO
(default)
+ *
#OPUS_BANDWIDTH_NARROWBAND
4 kHz passband
+ *
#OPUS_BANDWIDTH_MEDIUMBAND
6 kHz passband
+ *
#OPUS_BANDWIDTH_WIDEBAND
8 kHz passband
+ *
#OPUS_BANDWIDTH_SUPERWIDEBAND
12 kHz passband
+ *
#OPUS_BANDWIDTH_FULLBAND
20 kHz passband
+ *
+ * @hideinitializer */ +#define OPUS_GET_BANDWIDTH(x) OPUS_GET_BANDWIDTH_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_decoderctls Decoder related CTLs + * @see opus_genericctls, opus_encoderctls, opus_decoder + * @{ + */ + +/** Configures decoder gain adjustment. + * Scales the decoded output by a factor specified in Q8 dB units. + * This has a maximum range of -32768 to 32767 inclusive, and returns + * OPUS_BAD_ARG otherwise. The default is zero indicating no adjustment. + * This setting survives decoder reset. + * + * gain = pow(10, x/(20.0*256)) + * + * @param[in] x opus_int32: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_SET_GAIN(x) OPUS_SET_GAIN_REQUEST, __opus_check_int(x) +/** Gets the decoder's configured gain adjustment. @see OPUS_SET_GAIN + * + * @param[out] x opus_int32 *: Amount to scale PCM signal by in Q8 dB units. + * @hideinitializer */ +#define OPUS_GET_GAIN(x) OPUS_GET_GAIN_REQUEST, __opus_check_int_ptr(x) + +/**@}*/ + +/** @defgroup opus_libinfo Opus library information functions + * @{ + */ + +/** Converts an opus error code into a human readable string. + * + * @param[in] error int: Error number + * @returns Error string + */ +OPUS_EXPORT const char *opus_strerror(int error); + +/** Gets the libopus version string. + * + * @returns Version string + */ +OPUS_EXPORT const char *opus_get_version_string(void); +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_DEFINES_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_multistream.h b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_multistream.h new file mode 100644 index 0000000..ae59979 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_multistream.h @@ -0,0 +1,660 @@ +/* Copyright (c) 2011 Xiph.Org Foundation + Written by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/** + * @file opus_multistream.h + * @brief Opus reference implementation multistream API + */ + +#ifndef OPUS_MULTISTREAM_H +#define OPUS_MULTISTREAM_H + +#include "opus.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** @cond OPUS_INTERNAL_DOC */ + +/** Macros to trigger compilation errors when the wrong types are provided to a + * CTL. */ +/**@{*/ +#define __opus_check_encstate_ptr(ptr) ((ptr) + ((ptr) - (OpusEncoder**)(ptr))) +#define __opus_check_decstate_ptr(ptr) ((ptr) + ((ptr) - (OpusDecoder**)(ptr))) +/**@}*/ + +/** These are the actual encoder and decoder CTL ID numbers. + * They should not be used directly by applications. + * In general, SETs should be even and GETs should be odd.*/ +/**@{*/ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST 5120 +#define OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST 5122 +/**@}*/ + +/** @endcond */ + +/** @defgroup opus_multistream_ctls Multistream specific encoder and decoder CTLs + * + * These are convenience macros that are specific to the + * opus_multistream_encoder_ctl() and opus_multistream_decoder_ctl() + * interface. + * The CTLs from @ref opus_genericctls, @ref opus_encoderctls, and + * @ref opus_decoderctls may be applied to a multistream encoder or decoder as + * well. + * In addition, you may retrieve the encoder or decoder state for an specific + * stream via #OPUS_MULTISTREAM_GET_ENCODER_STATE or + * #OPUS_MULTISTREAM_GET_DECODER_STATE and apply CTLs to it individually. + */ +/**@{*/ + +/** Gets the encoder state for an individual stream of a multistream encoder. + * @param[in] x opus_int32: The index of the stream whose encoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the encoder. + * @param[out] y OpusEncoder**: Returns a pointer to the given + * encoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_ENCODER_STATE(x,y) OPUS_MULTISTREAM_GET_ENCODER_STATE_REQUEST, __opus_check_int(x), __opus_check_encstate_ptr(y) + +/** Gets the decoder state for an individual stream of a multistream decoder. + * @param[in] x opus_int32: The index of the stream whose decoder you + * wish to retrieve. + * This must be non-negative and less than + * the streams parameter used + * to initialize the decoder. + * @param[out] y OpusDecoder**: Returns a pointer to the given + * decoder state. + * @retval OPUS_BAD_ARG The index of the requested stream was out of range. + * @hideinitializer + */ +#define OPUS_MULTISTREAM_GET_DECODER_STATE(x,y) OPUS_MULTISTREAM_GET_DECODER_STATE_REQUEST, __opus_check_int(x), __opus_check_decstate_ptr(y) + +/**@}*/ + +/** @defgroup opus_multistream Opus Multistream API + * @{ + * + * The multistream API allows individual Opus streams to be combined into a + * single packet, enabling support for up to 255 channels. Unlike an + * elementary Opus stream, the encoder and decoder must negotiate the channel + * configuration before the decoder can successfully interpret the data in the + * packets produced by the encoder. Some basic information, such as packet + * duration, can be computed without any special negotiation. + * + * The format for multistream Opus packets is defined in the + * Ogg + * encapsulation specification and is based on the self-delimited Opus + * framing described in Appendix B of RFC 6716. + * Normal Opus packets are just a degenerate case of multistream Opus packets, + * and can be encoded or decoded with the multistream API by setting + * streams to 1 when initializing the encoder or + * decoder. + * + * Multistream Opus streams can contain up to 255 elementary Opus streams. + * These may be either "uncoupled" or "coupled", indicating that the decoder + * is configured to decode them to either 1 or 2 channels, respectively. + * The streams are ordered so that all coupled streams appear at the + * beginning. + * + * A mapping table defines which decoded channel i + * should be used for each input/output (I/O) channel j. This table is + * typically provided as an unsigned char array. + * Let i = mapping[j] be the index for I/O channel j. + * If i < 2*coupled_streams, then I/O channel j is + * encoded as the left channel of stream (i/2) if i + * is even, or as the right channel of stream (i/2) if + * i is odd. Otherwise, I/O channel j is encoded as + * mono in stream (i - coupled_streams), unless it has the special + * value 255, in which case it is omitted from the encoding entirely (the + * decoder will reproduce it as silence). Each value i must either + * be the special value 255 or be less than streams + coupled_streams. + * + * The output channels specified by the encoder + * should use the + * Vorbis + * channel ordering. A decoder may wish to apply an additional permutation + * to the mapping the encoder used to achieve a different output channel + * order (e.g. for outputing in WAV order). + * + * Each multistream packet contains an Opus packet for each stream, and all of + * the Opus packets in a single multistream packet must have the same + * duration. Therefore the duration of a multistream packet can be extracted + * from the TOC sequence of the first stream, which is located at the + * beginning of the packet, just like an elementary Opus stream: + * + * @code + * int nb_samples; + * int nb_frames; + * nb_frames = opus_packet_get_nb_frames(data, len); + * if (nb_frames < 1) + * return nb_frames; + * nb_samples = opus_packet_get_samples_per_frame(data, 48000) * nb_frames; + * @endcode + * + * The general encoding and decoding process proceeds exactly the same as in + * the normal @ref opus_encoder and @ref opus_decoder APIs. + * See their documentation for an overview of how to use the corresponding + * multistream functions. + */ + +/** Opus multistream encoder state. + * This contains the complete state of a multistream Opus encoder. + * It is position independent and can be freely copied. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_init + */ +typedef struct OpusMSEncoder OpusMSEncoder; + +/** Opus multistream decoder state. + * This contains the complete state of a multistream Opus decoder. + * It is position independent and can be freely copied. + * @see opus_multistream_decoder_create + * @see opus_multistream_decoder_init + */ +typedef struct OpusMSDecoder OpusMSDecoder; + +/**\name Multistream encoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSEncoder structure. + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_encoder_get_size( + int streams, + int coupled_streams +); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_surround_encoder_get_size( + int channels, + int mapping_family +); + + +/** Allocates and initializes a multistream encoder state. + * Call opus_multistream_encoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_encoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(5); + +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSEncoder *opus_multistream_surround_encoder_create( + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application, + int *error +) OPUS_ARG_NONNULL(5); + +/** Initialize a previously allocated multistream encoder state. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_encoder_create + * @see opus_multistream_encoder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate of the input signal (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels in the input signal. + * This must be at most 255. + * It may be greater than the number of + * coded channels (streams + + * coupled_streams). + * @param streams int: The total number of streams to encode from the + * input. + * This must be no more than the number of channels. + * @param coupled_streams int: Number of coupled (2 channel) streams + * to encode. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * encoded channels (streams + + * coupled_streams) must be no + * more than the number of input channels. + * @param[in] mapping const unsigned char[channels]: Mapping from + * encoded channels to input channels, as described in + * @ref opus_multistream. As an extra constraint, the + * multistream encoder does not allow encoding coupled + * streams for which one channel is unused since this + * is never a good idea. + * @param application int: The target encoder application. + * This must be one of the following: + *
+ *
#OPUS_APPLICATION_VOIP
+ *
Process signal for improved speech intelligibility.
+ *
#OPUS_APPLICATION_AUDIO
+ *
Favor faithfulness to the original input.
+ *
#OPUS_APPLICATION_RESTRICTED_LOWDELAY
+ *
Configure the minimum possible coding delay by disabling certain modes + * of operation.
+ *
+ * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +OPUS_EXPORT int opus_multistream_surround_encoder_init( + OpusMSEncoder *st, + opus_int32 Fs, + int channels, + int mapping_family, + int *streams, + int *coupled_streams, + unsigned char *mapping, + int application +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +/** Encodes a multistream Opus frame. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const opus_int16*: The input signal as interleaved + * samples. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode( + OpusMSEncoder *st, + const opus_int16 *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Encodes a multistream Opus frame from floating point input. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param[in] pcm const float*: The input signal as interleaved + * samples with a normal range of + * +/-1.0. + * Samples with a range beyond +/-1.0 + * are supported but will be clipped by + * decoders using the integer API and + * should only be used if it is known + * that the far end supports extended + * dynamic range. + * This must contain + * frame_size*channels + * samples. + * @param frame_size int: Number of samples per channel in the input + * signal. + * This must be an Opus frame size for the + * encoder's sampling rate. + * For example, at 48 kHz the permitted values + * are 120, 240, 480, 960, 1920, and 2880. + * Passing in a duration of less than 10 ms + * (480 samples at 48 kHz) will prevent the + * encoder from using the LPC or hybrid modes. + * @param[out] data unsigned char*: Output payload. + * This must contain storage for at + * least \a max_data_bytes. + * @param [in] max_data_bytes opus_int32: Size of the allocated + * memory for the output + * payload. This may be + * used to impose an upper limit on + * the instant bitrate, but should + * not be used as the only bitrate + * control. Use #OPUS_SET_BITRATE to + * control the bitrate. + * @returns The length of the encoded packet (in bytes) on success or a + * negative error code (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_encode_float( + OpusMSEncoder *st, + const float *pcm, + int frame_size, + unsigned char *data, + opus_int32 max_data_bytes +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(2) OPUS_ARG_NONNULL(4); + +/** Frees an OpusMSEncoder allocated by + * opus_multistream_encoder_create(). + * @param st OpusMSEncoder*: Multistream encoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_encoder_destroy(OpusMSEncoder *st); + +/** Perform a CTL function on a multistream Opus encoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSEncoder*: Multistream encoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_encoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_encoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_encoder_ctl(OpusMSEncoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/**@}*/ + +/**\name Multistream decoder functions */ +/**@{*/ + +/** Gets the size of an OpusMSDecoder structure. + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @returns The size in bytes on success, or a negative error code + * (see @ref opus_errorcodes) on error. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT opus_int32 opus_multistream_decoder_get_size( + int streams, + int coupled_streams +); + +/** Allocates and initializes a multistream decoder state. + * Call opus_multistream_decoder_destroy() to release + * this object when finished. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @param[out] error int *: Returns #OPUS_OK on success, or an error + * code (see @ref opus_errorcodes) on + * failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT OpusMSDecoder *opus_multistream_decoder_create( + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping, + int *error +) OPUS_ARG_NONNULL(5); + +/** Intialize a previously allocated decoder state object. + * The memory pointed to by \a st must be at least the size returned by + * opus_multistream_encoder_get_size(). + * This is intended for applications which use their own allocator instead of + * malloc. + * To reset a previously initialized state, use the #OPUS_RESET_STATE CTL. + * @see opus_multistream_decoder_create + * @see opus_multistream_deocder_get_size + * @param st OpusMSEncoder*: Multistream encoder state to initialize. + * @param Fs opus_int32: Sampling rate to decode at (in Hz). + * This must be one of 8000, 12000, 16000, + * 24000, or 48000. + * @param channels int: Number of channels to output. + * This must be at most 255. + * It may be different from the number of coded + * channels (streams + + * coupled_streams). + * @param streams int: The total number of streams coded in the + * input. + * This must be no more than 255. + * @param coupled_streams int: Number of streams to decode as coupled + * (2 channel) streams. + * This must be no larger than the total + * number of streams. + * Additionally, The total number of + * coded channels (streams + + * coupled_streams) must be no + * more than 255. + * @param[in] mapping const unsigned char[channels]: Mapping from + * coded channels to output channels, as described in + * @ref opus_multistream. + * @returns #OPUS_OK on success, or an error code (see @ref opus_errorcodes) + * on failure. + */ +OPUS_EXPORT int opus_multistream_decoder_init( + OpusMSDecoder *st, + opus_int32 Fs, + int channels, + int streams, + int coupled_streams, + const unsigned char *mapping +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(6); + +/** Decode a multistream Opus packet. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + opus_int16 *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Decode a multistream Opus packet with floating point output. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param[in] data const unsigned char*: Input payload. + * Use a NULL + * pointer to indicate packet + * loss. + * @param len opus_int32: Number of bytes in payload. + * @param[out] pcm opus_int16*: Output signal, with interleaved + * samples. + * This must contain room for + * frame_size*channels + * samples. + * @param frame_size int: The number of samples per channel of + * available space in \a pcm. + * If this is less than the maximum packet duration + * (120 ms; 5760 for 48kHz), this function will not be capable + * of decoding some packets. In the case of PLC (data==NULL) + * or FEC (decode_fec=1), then frame_size needs to be exactly + * the duration of audio that is missing, otherwise the + * decoder will not be in the optimal state to decode the + * next incoming packet. For the PLC and FEC cases, frame_size + * must be a multiple of 2.5 ms. + * @param decode_fec int: Flag (0 or 1) to request that any in-band + * forward error correction data be decoded. + * If no such data is available, the frame is + * decoded as if it were lost. + * @returns Number of samples decoded on success or a negative error code + * (see @ref opus_errorcodes) on failure. + */ +OPUS_EXPORT OPUS_WARN_UNUSED_RESULT int opus_multistream_decode_float( + OpusMSDecoder *st, + const unsigned char *data, + opus_int32 len, + float *pcm, + int frame_size, + int decode_fec +) OPUS_ARG_NONNULL(1) OPUS_ARG_NONNULL(4); + +/** Perform a CTL function on a multistream Opus decoder. + * + * Generally the request and subsequent arguments are generated by a + * convenience macro. + * @param st OpusMSDecoder*: Multistream decoder state. + * @param request This and all remaining parameters should be replaced by one + * of the convenience macros in @ref opus_genericctls, + * @ref opus_decoderctls, or @ref opus_multistream_ctls. + * @see opus_genericctls + * @see opus_decoderctls + * @see opus_multistream_ctls + */ +OPUS_EXPORT int opus_multistream_decoder_ctl(OpusMSDecoder *st, int request, ...) OPUS_ARG_NONNULL(1); + +/** Frees an OpusMSDecoder allocated by + * opus_multistream_decoder_create(). + * @param st OpusMSDecoder: Multistream decoder state to be freed. + */ +OPUS_EXPORT void opus_multistream_decoder_destroy(OpusMSDecoder *st); + +/**@}*/ + +/**@}*/ + +#ifdef __cplusplus +} +#endif + +#endif /* OPUS_MULTISTREAM_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_types.h b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_types.h new file mode 100644 index 0000000..b28e03a --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/include/opus/opus_types.h @@ -0,0 +1,159 @@ +/* (C) COPYRIGHT 1994-2002 Xiph.Org Foundation */ +/* Modified by Jean-Marc Valin */ +/* + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER + OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +/* opus_types.h based on ogg_types.h from libogg */ + +/** + @file opus_types.h + @brief Opus reference implementation types +*/ +#ifndef OPUS_TYPES_H +#define OPUS_TYPES_H + +/* Use the real stdint.h if it's there (taken from Paul Hsieh's pstdint.h) */ +#if (defined(__STDC__) && __STDC__ && __STDC_VERSION__ >= 199901L) || (defined(__GNUC__) && (defined(_STDINT_H) || defined(_STDINT_H_)) || defined (HAVE_STDINT_H)) +#include + + typedef int16_t opus_int16; + typedef uint16_t opus_uint16; + typedef int32_t opus_int32; + typedef uint32_t opus_uint32; +#elif defined(_WIN32) + +# if defined(__CYGWIN__) +# include <_G_config.h> + typedef _G_int32_t opus_int32; + typedef _G_uint32_t opus_uint32; + typedef _G_int16 opus_int16; + typedef _G_uint16 opus_uint16; +# elif defined(__MINGW32__) + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; +# elif defined(__MWERKS__) + typedef int opus_int32; + typedef unsigned int opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; +# else + /* MSVC/Borland */ + typedef __int32 opus_int32; + typedef unsigned __int32 opus_uint32; + typedef __int16 opus_int16; + typedef unsigned __int16 opus_uint16; +# endif + +#elif defined(__MACOS__) + +# include + typedef SInt16 opus_int16; + typedef UInt16 opus_uint16; + typedef SInt32 opus_int32; + typedef UInt32 opus_uint32; + +#elif (defined(__APPLE__) && defined(__MACH__)) /* MacOS X Framework build */ + +# include + typedef int16_t opus_int16; + typedef u_int16_t opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined(__BEOS__) + + /* Be */ +# include + typedef int16 opus_int16; + typedef u_int16 opus_uint16; + typedef int32_t opus_int32; + typedef u_int32_t opus_uint32; + +#elif defined (__EMX__) + + /* OS/2 GCC */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined (DJGPP) + + /* DJGPP */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(R5900) + + /* PS2 EE */ + typedef int opus_int32; + typedef unsigned opus_uint32; + typedef short opus_int16; + typedef unsigned short opus_uint16; + +#elif defined(__SYMBIAN32__) + + /* Symbian GCC */ + typedef signed short opus_int16; + typedef unsigned short opus_uint16; + typedef signed int opus_int32; + typedef unsigned int opus_uint32; + +#elif defined(CONFIG_TI_C54X) || defined (CONFIG_TI_C55X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef long opus_int32; + typedef unsigned long opus_uint32; + +#elif defined(CONFIG_TI_C6X) + + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#else + + /* Give up, take a reasonable guess */ + typedef short opus_int16; + typedef unsigned short opus_uint16; + typedef int opus_int32; + typedef unsigned int opus_uint32; + +#endif + +#define opus_int int /* used for counters etc; at least 16 bits */ +#define opus_int64 long long +#define opus_int8 signed char + +#define opus_uint unsigned int /* used for counters etc; at least 16 bits */ +#define opus_uint64 unsigned long long +#define opus_uint8 unsigned char + +#endif /* OPUS_TYPES_H */ diff --git a/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.a b/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.a new file mode 100644 index 0000000..0181dba Binary files /dev/null and b/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.a differ diff --git a/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.la b/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.la new file mode 100755 index 0000000..99e338b --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/lib/libopus.la @@ -0,0 +1,41 @@ +# libopus.la - a libtool library file +# Generated by libtool (GNU libtool) 2.4.2 +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='' + +# Names of this library. +library_names='' + +# The name of the static archive. +old_library='libopus.a' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags=' ' + +# Libraries that this one depends upon. +dependency_libs=' -L=/usr/lib' + +# Names of additional weak libraries provided by this library +weak_library_names='' + +# Version information for libopus. +current=4 +age=4 +revision=0 + +# Is this an already installed library? +installed=yes + +# Should we warn about portability when linking against -modules? +shouldnotlink=no + +# Files to dlopen/dlpreopen +dlopen='' +dlpreopen='' + +# Directory that this library needs to be installed in: +libdir='/Users/diegowaxemberg/Downloads/opus-ios-build-master/opus-ios-build/dist-armv7s/lib' diff --git a/Limelight-iOS/libs/opus/dist-armv7s/lib/pkgconfig/opus.pc b/Limelight-iOS/libs/opus/dist-armv7s/lib/pkgconfig/opus.pc new file mode 100644 index 0000000..6dadcd5 --- /dev/null +++ b/Limelight-iOS/libs/opus/dist-armv7s/lib/pkgconfig/opus.pc @@ -0,0 +1,16 @@ +# Opus codec reference implementation pkg-config file + +prefix=/Users/diegowaxemberg/Downloads/opus-ios-build-master/opus-ios-build/dist-armv7s +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: Opus +Description: Opus IETF audio codec (floating-point build) +URL: http://opus-codec.org/ +Version: unknown +Requires: +Conflicts: +Libs: -L${libdir} -lopus +Libs.private: +Cflags: -I${includedir}/opus diff --git a/Limelight-iOS/main.m b/Limelight-iOS/main.m new file mode 100644 index 0000000..a5a78d2 --- /dev/null +++ b/Limelight-iOS/main.m @@ -0,0 +1,18 @@ +// +// main.m +// Limelight-iOS +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +#import "AppDelegate.h" + +int main(int argc, char * argv[]) +{ + @autoreleasepool { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); + } +} diff --git a/Limelight-iOSTests/Limelight-iOSTests-Info.plist b/Limelight-iOSTests/Limelight-iOSTests-Info.plist new file mode 100644 index 0000000..8514cb1 --- /dev/null +++ b/Limelight-iOSTests/Limelight-iOSTests-Info.plist @@ -0,0 +1,22 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIdentifier + com.limelight.${PRODUCT_NAME:rfc1034identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/Limelight-iOSTests/Limelight_iOSTests.m b/Limelight-iOSTests/Limelight_iOSTests.m new file mode 100644 index 0000000..cffdb0e --- /dev/null +++ b/Limelight-iOSTests/Limelight_iOSTests.m @@ -0,0 +1,34 @@ +// +// Limelight_iOSTests.m +// Limelight-iOSTests +// +// Created by Diego Waxemberg on 1/17/14. +// Copyright (c) 2014 Diego Waxemberg. All rights reserved. +// + +#import + +@interface Limelight_iOSTests : XCTestCase + +@end + +@implementation Limelight_iOSTests + +- (void)setUp +{ + [super setUp]; + // Put setup code here. This method is called before the invocation of each test method in the class. +} + +- (void)tearDown +{ + // Put teardown code here. This method is called after the invocation of each test method in the class. + [super tearDown]; +} + +- (void)testExample +{ + XCTFail(@"No implementation for \"%s\"", __PRETTY_FUNCTION__); +} + +@end diff --git a/Limelight-iOSTests/en.lproj/InfoPlist.strings b/Limelight-iOSTests/en.lproj/InfoPlist.strings new file mode 100644 index 0000000..477b28f --- /dev/null +++ b/Limelight-iOSTests/en.lproj/InfoPlist.strings @@ -0,0 +1,2 @@ +/* Localized versions of Info.plist keys */ + diff --git a/MainFrame.storyboard b/MainFrame.storyboard new file mode 100644 index 0000000..e5e50c4 --- /dev/null +++ b/MainFrame.storyboard @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/notpadded.h264 b/notpadded.h264 new file mode 100644 index 0000000..8607070 Binary files /dev/null and b/notpadded.h264 differ