mirror of
https://github.com/moonlight-stream/moonlight-ios.git
synced 2026-02-16 02:20:53 +00:00
fixed the ffmpeg build size
This commit is contained in:
@@ -286,6 +286,15 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_HNM4_VIDEO,
|
||||
AV_CODEC_ID_HEVC_DEPRECATED,
|
||||
AV_CODEC_ID_FIC,
|
||||
AV_CODEC_ID_ALIAS_PIX,
|
||||
AV_CODEC_ID_BRENDER_PIX_DEPRECATED,
|
||||
AV_CODEC_ID_PAF_VIDEO_DEPRECATED,
|
||||
AV_CODEC_ID_EXR_DEPRECATED,
|
||||
AV_CODEC_ID_VP7_DEPRECATED,
|
||||
AV_CODEC_ID_SANM_DEPRECATED,
|
||||
AV_CODEC_ID_SGIRLE_DEPRECATED,
|
||||
AV_CODEC_ID_MVC1_DEPRECATED,
|
||||
AV_CODEC_ID_MVC2_DEPRECATED,
|
||||
|
||||
AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),
|
||||
AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
|
||||
@@ -314,6 +323,7 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'),
|
||||
AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'),
|
||||
#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
|
||||
AV_CODEC_ID_VP7 = MKBETAG('V','P','7','0'),
|
||||
|
||||
/* various PCM "codecs" */
|
||||
AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
|
||||
@@ -382,6 +392,8 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_ADPCM_IMA_ISS,
|
||||
AV_CODEC_ID_ADPCM_G722,
|
||||
AV_CODEC_ID_ADPCM_IMA_APC,
|
||||
AV_CODEC_ID_ADPCM_VIMA_DEPRECATED,
|
||||
AV_CODEC_ID_ADPCM_VIMA = MKBETAG('V','I','M','A'),
|
||||
AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
|
||||
AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '),
|
||||
AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),
|
||||
@@ -471,6 +483,8 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_COMFORT_NOISE,
|
||||
AV_CODEC_ID_TAK_DEPRECATED,
|
||||
AV_CODEC_ID_METASOUND,
|
||||
AV_CODEC_ID_PAF_AUDIO_DEPRECATED,
|
||||
AV_CODEC_ID_ON2AVC,
|
||||
AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
|
||||
AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
|
||||
AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
|
||||
@@ -479,6 +493,10 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'),
|
||||
AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'),
|
||||
AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'),
|
||||
AV_CODEC_ID_DSD_LSBF = MKBETAG('D','S','D','L'),
|
||||
AV_CODEC_ID_DSD_MSBF = MKBETAG('D','S','D','M'),
|
||||
AV_CODEC_ID_DSD_LSBF_PLANAR = MKBETAG('D','S','D','1'),
|
||||
AV_CODEC_ID_DSD_MSBF_PLANAR = MKBETAG('D','S','D','8'),
|
||||
|
||||
/* subtitle codecs */
|
||||
AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
|
||||
@@ -515,6 +533,7 @@ enum AVCodecID {
|
||||
AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'),
|
||||
AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'),
|
||||
AV_CODEC_ID_TIMED_ID3 = MKBETAG('T','I','D','3'),
|
||||
AV_CODEC_ID_BIN_DATA = MKBETAG('D','A','T','A'),
|
||||
|
||||
|
||||
AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
|
||||
@@ -552,6 +571,13 @@ typedef struct AVCodecDescriptor {
|
||||
* Codec properties, a combination of AV_CODEC_PROP_* flags.
|
||||
*/
|
||||
int props;
|
||||
|
||||
/**
|
||||
* MIME type(s) associated with the codec.
|
||||
* May be NULL; if not, a NULL-terminated array of MIME types.
|
||||
* The first item is always non-NULL and is the preferred MIME type.
|
||||
*/
|
||||
const char *const *mime_types;
|
||||
} AVCodecDescriptor;
|
||||
|
||||
/**
|
||||
@@ -588,7 +614,7 @@ typedef struct AVCodecDescriptor {
|
||||
* Note: If the first 23 bits of the additional bytes are not 0, then damaged
|
||||
* MPEG bitstreams could cause overread and segfault.
|
||||
*/
|
||||
#define FF_INPUT_BUFFER_PADDING_SIZE 16
|
||||
#define FF_INPUT_BUFFER_PADDING_SIZE 32
|
||||
|
||||
/**
|
||||
* @ingroup lavc_encoding
|
||||
@@ -625,56 +651,11 @@ enum AVDiscard{
|
||||
AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
|
||||
AVDISCARD_NONREF = 8, ///< discard all non reference
|
||||
AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
|
||||
AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
|
||||
AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
|
||||
AVDISCARD_ALL = 48, ///< discard all
|
||||
};
|
||||
|
||||
enum AVColorPrimaries{
|
||||
AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
|
||||
AVCOL_PRI_UNSPECIFIED = 2,
|
||||
AVCOL_PRI_BT470M = 4,
|
||||
AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
|
||||
AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
|
||||
AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
|
||||
AVCOL_PRI_FILM = 8,
|
||||
AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
|
||||
AVCOL_PRI_NB , ///< Not part of ABI
|
||||
};
|
||||
|
||||
enum AVColorTransferCharacteristic{
|
||||
AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
|
||||
AVCOL_TRC_UNSPECIFIED = 2,
|
||||
AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
|
||||
AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
|
||||
AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
|
||||
AVCOL_TRC_SMPTE240M = 7,
|
||||
AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
|
||||
AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
|
||||
AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt( 10 ) : 1 range)"
|
||||
AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
|
||||
AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
|
||||
AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
|
||||
AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system
|
||||
AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system
|
||||
AVCOL_TRC_NB , ///< Not part of ABI
|
||||
};
|
||||
|
||||
/**
|
||||
* X X 3 4 X X are luma samples,
|
||||
* 1 2 1-6 are possible chroma positions
|
||||
* X X 5 6 X 0 is undefined/unknown position
|
||||
*/
|
||||
enum AVChromaLocation{
|
||||
AVCHROMA_LOC_UNSPECIFIED = 0,
|
||||
AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
|
||||
AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
|
||||
AVCHROMA_LOC_TOPLEFT = 3, ///< DV
|
||||
AVCHROMA_LOC_TOP = 4,
|
||||
AVCHROMA_LOC_BOTTOMLEFT = 5,
|
||||
AVCHROMA_LOC_BOTTOM = 6,
|
||||
AVCHROMA_LOC_NB , ///< Not part of ABI
|
||||
};
|
||||
|
||||
enum AVAudioServiceType {
|
||||
AV_AUDIO_SERVICE_TYPE_MAIN = 0,
|
||||
AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
|
||||
@@ -719,14 +700,26 @@ typedef struct RcOverride{
|
||||
#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
|
||||
#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted
|
||||
#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
|
||||
#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
|
||||
#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>.
|
||||
#if FF_API_GMC
|
||||
/**
|
||||
* The parent program guarantees that the input for B-frames containing
|
||||
* streams is not written to for at least s->max_b_frames+1 frames, if
|
||||
* this is not set the input will be copied.
|
||||
* @deprecated use the "gmc" private option of the libxvid encoder
|
||||
*/
|
||||
#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
|
||||
#endif
|
||||
#if FF_API_MV0
|
||||
/**
|
||||
* @deprecated use the flag "mv0" in the "mpv_flags" private option of the
|
||||
* mpegvideo encoders
|
||||
*/
|
||||
#define CODEC_FLAG_MV0 0x0040
|
||||
#endif
|
||||
#if FF_API_INPUT_PRESERVED
|
||||
/**
|
||||
* @deprecated passing reference-counted frames to the encoders replaces this
|
||||
* flag
|
||||
*/
|
||||
#define CODEC_FLAG_INPUT_PRESERVED 0x0100
|
||||
#endif
|
||||
#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
|
||||
#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
|
||||
#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
|
||||
@@ -740,7 +733,13 @@ typedef struct RcOverride{
|
||||
#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
|
||||
#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
|
||||
location instead of only at frame boundaries. */
|
||||
#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization.
|
||||
#if FF_API_NORMALIZE_AQP
|
||||
/**
|
||||
* @deprecated use the flag "naq" in the "mpv_flags" private option of the
|
||||
* mpegvideo encoders
|
||||
*/
|
||||
#define CODEC_FLAG_NORMALIZE_AQP 0x00020000
|
||||
#endif
|
||||
#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
|
||||
#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
|
||||
#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
|
||||
@@ -1002,6 +1001,21 @@ enum AVPacketSideDataType {
|
||||
*/
|
||||
AV_PKT_DATA_H263_MB_INFO,
|
||||
|
||||
/**
|
||||
* This side data should be associated with an audio stream and contains
|
||||
* ReplayGain information in form of the AVReplayGain struct.
|
||||
*/
|
||||
AV_PKT_DATA_REPLAYGAIN,
|
||||
|
||||
/**
|
||||
* This side data contains a 3x3 transformation matrix describing an affine
|
||||
* transformation that needs to be applied to the decoded video frames for
|
||||
* correct presentation.
|
||||
*
|
||||
* See libavutil/display.h for a detailed description of the data.
|
||||
*/
|
||||
AV_PKT_DATA_DISPLAYMATRIX,
|
||||
|
||||
/**
|
||||
* Recommmends skipping the specified number of samples
|
||||
* @code
|
||||
@@ -1067,6 +1081,12 @@ enum AVPacketSideDataType {
|
||||
AV_PKT_DATA_METADATA_UPDATE,
|
||||
};
|
||||
|
||||
typedef struct AVPacketSideData {
|
||||
uint8_t *data;
|
||||
int size;
|
||||
enum AVPacketSideDataType type;
|
||||
} AVPacketSideData;
|
||||
|
||||
/**
|
||||
* This structure stores compressed data. It is typically exported by demuxers
|
||||
* and then passed as input to decoders, or received as output from encoders and
|
||||
@@ -1123,11 +1143,7 @@ typedef struct AVPacket {
|
||||
* Additional packet data that can be provided by the container.
|
||||
* Packet can contain several types of side information.
|
||||
*/
|
||||
struct {
|
||||
uint8_t *data;
|
||||
int size;
|
||||
enum AVPacketSideDataType type;
|
||||
} *side_data;
|
||||
AVPacketSideData *side_data;
|
||||
int side_data_elems;
|
||||
|
||||
/**
|
||||
@@ -1205,7 +1221,13 @@ typedef struct AVCodecContext {
|
||||
|
||||
enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
|
||||
const struct AVCodec *codec;
|
||||
#if FF_API_CODEC_NAME
|
||||
/**
|
||||
* @deprecated this field is not used for anything in libavcodec
|
||||
*/
|
||||
attribute_deprecated
|
||||
char codec_name[32];
|
||||
#endif
|
||||
enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
|
||||
|
||||
/**
|
||||
@@ -1338,12 +1360,17 @@ typedef struct AVCodecContext {
|
||||
* encoded input.
|
||||
*
|
||||
* Audio:
|
||||
* For encoding, this is the number of "priming" samples added to the
|
||||
* beginning of the stream. The decoded output will be delayed by this
|
||||
* many samples relative to the input to the encoder. Note that this
|
||||
* field is purely informational and does not directly affect the pts
|
||||
* output by the encoder, which should always be based on the actual
|
||||
* presentation time, including any delay.
|
||||
* For encoding, this is the number of "priming" samples added by the
|
||||
* encoder to the beginning of the stream. The decoded output will be
|
||||
* delayed by this many samples relative to the input to the encoder (or
|
||||
* more, if the decoder adds its own padding).
|
||||
* The timestamps on the output packets are adjusted by the encoder so
|
||||
* that they always refer to the first sample of the data actually
|
||||
* contained in the packet, including any added padding.
|
||||
* E.g. if the timebase is 1/samplerate and the timestamp of the first
|
||||
* input sample is 0, the timestamp of the first output packet will be
|
||||
* -delay.
|
||||
*
|
||||
* For decoding, this is the number of samples the decoder needs to
|
||||
* output before the decoder's output is valid. When seeking, you should
|
||||
* start decoding this many samples prior to your desired seek point.
|
||||
@@ -2230,7 +2257,7 @@ typedef struct AVCodecContext {
|
||||
|
||||
/**
|
||||
* ratecontrol qmin qmax limiting method
|
||||
* 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax.
|
||||
* 0-> clipping, 1-> use a nice continuous function to limit qscale within qmin/qmax.
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -2264,7 +2291,7 @@ typedef struct AVCodecContext {
|
||||
/**
|
||||
* maximum bitrate
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
* - decoding: Set by libavcodec.
|
||||
*/
|
||||
int rc_max_rate;
|
||||
|
||||
@@ -2325,14 +2352,14 @@ typedef struct AVCodecContext {
|
||||
int context_model;
|
||||
|
||||
/**
|
||||
* minimum Lagrange multipler
|
||||
* minimum Lagrange multiplier
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
int lmin;
|
||||
|
||||
/**
|
||||
* maximum Lagrange multipler
|
||||
* maximum Lagrange multiplier
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -2493,6 +2520,7 @@ typedef struct AVCodecContext {
|
||||
int error_concealment;
|
||||
#define FF_EC_GUESS_MVS 1
|
||||
#define FF_EC_DEBLOCK 2
|
||||
#define FF_EC_FAVOR_INTER 256
|
||||
|
||||
/**
|
||||
* debug
|
||||
@@ -2524,6 +2552,7 @@ typedef struct AVCodecContext {
|
||||
#endif
|
||||
#define FF_DEBUG_BUFFERS 0x00008000
|
||||
#define FF_DEBUG_THREADS 0x00010000
|
||||
#define FF_DEBUG_NOMC 0x01000000
|
||||
|
||||
#if FF_API_DEBUG_MV
|
||||
/**
|
||||
@@ -2556,8 +2585,9 @@ typedef struct AVCodecContext {
|
||||
#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
|
||||
#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
|
||||
|
||||
#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
|
||||
#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
|
||||
#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliancies as errors
|
||||
#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
|
||||
#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error
|
||||
|
||||
|
||||
@@ -2621,18 +2651,23 @@ typedef struct AVCodecContext {
|
||||
#define FF_IDCT_SIMPLEMMX 3
|
||||
#define FF_IDCT_ARM 7
|
||||
#define FF_IDCT_ALTIVEC 8
|
||||
#if FF_API_ARCH_SH4
|
||||
#define FF_IDCT_SH4 9
|
||||
#endif
|
||||
#define FF_IDCT_SIMPLEARM 10
|
||||
#define FF_IDCT_IPP 13
|
||||
#define FF_IDCT_XVIDMMX 14
|
||||
#define FF_IDCT_SIMPLEARMV5TE 16
|
||||
#define FF_IDCT_SIMPLEARMV6 17
|
||||
#if FF_API_ARCH_SPARC
|
||||
#define FF_IDCT_SIMPLEVIS 18
|
||||
#endif
|
||||
#define FF_IDCT_FAAN 20
|
||||
#define FF_IDCT_SIMPLENEON 22
|
||||
#if FF_API_ARCH_ALPHA
|
||||
#define FF_IDCT_SIMPLEALPHA 23
|
||||
#endif
|
||||
#define FF_IDCT_SIMPLEAUTO 128
|
||||
|
||||
/**
|
||||
* bits per sample/pixel from the demuxer (needed for huffyuv).
|
||||
@@ -2743,7 +2778,7 @@ typedef struct AVCodecContext {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* noise vs. sse weight for the nsse comparsion function
|
||||
* noise vs. sse weight for the nsse comparison function
|
||||
* - encoding: Set by user.
|
||||
* - decoding: unused
|
||||
*/
|
||||
@@ -2831,6 +2866,7 @@ typedef struct AVCodecContext {
|
||||
#define FF_PROFILE_HEVC_MAIN 1
|
||||
#define FF_PROFILE_HEVC_MAIN_10 2
|
||||
#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
|
||||
#define FF_PROFILE_HEVC_REXT 4
|
||||
|
||||
/**
|
||||
* level
|
||||
@@ -2897,6 +2933,21 @@ typedef struct AVCodecContext {
|
||||
*/
|
||||
uint64_t vbv_delay;
|
||||
|
||||
/**
|
||||
* Encoding only. Allow encoders to output packets that do not contain any
|
||||
* encoded data, only side data.
|
||||
*
|
||||
* Some encoders need to output such packets, e.g. to update some stream
|
||||
* parameters at the end of encoding.
|
||||
*
|
||||
* All callers are strongly recommended to set this option to 1 and update
|
||||
* their code to deal with such packets, since this behaviour may become
|
||||
* always enabled in the future (then this option will be deprecated and
|
||||
* later removed). To avoid ABI issues when this happens, the callers should
|
||||
* use AVOptions to set this field.
|
||||
*/
|
||||
int side_data_only_packets;
|
||||
|
||||
/**
|
||||
* Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
|
||||
* Code outside libavcodec should access this field using:
|
||||
@@ -3164,8 +3215,20 @@ typedef struct AVHWAccel {
|
||||
*/
|
||||
int capabilities;
|
||||
|
||||
/*****************************************************************
|
||||
* No fields below this line are part of the public API. They
|
||||
* may not be used outside of libavcodec and can be changed and
|
||||
* removed at will.
|
||||
* New public fields should be added right above.
|
||||
*****************************************************************
|
||||
*/
|
||||
struct AVHWAccel *next;
|
||||
|
||||
/**
|
||||
* Allocate a custom buffer
|
||||
*/
|
||||
int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
|
||||
|
||||
/**
|
||||
* Called at the beginning of each frame or field picture.
|
||||
*
|
||||
@@ -3208,13 +3271,13 @@ typedef struct AVHWAccel {
|
||||
int (*end_frame)(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
* Size of HW accelerator private data.
|
||||
* Size of per-frame hardware accelerator private data.
|
||||
*
|
||||
* Private data is allocated with av_mallocz() before
|
||||
* AVCodecContext.get_buffer() and deallocated after
|
||||
* AVCodecContext.release_buffer().
|
||||
*/
|
||||
int priv_data_size;
|
||||
int frame_priv_data_size;
|
||||
|
||||
/**
|
||||
* Called for every Macroblock in a slice.
|
||||
@@ -3226,6 +3289,29 @@ typedef struct AVHWAccel {
|
||||
* @param s the mpeg context
|
||||
*/
|
||||
void (*decode_mb)(struct MpegEncContext *s);
|
||||
|
||||
/**
|
||||
* Initialize the hwaccel private data.
|
||||
*
|
||||
* This will be called from ff_get_format(), after hwaccel and
|
||||
* hwaccel_context are set and the hwaccel private data in AVCodecInternal
|
||||
* is allocated.
|
||||
*/
|
||||
int (*init)(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
* Uninitialize the hwaccel private data.
|
||||
*
|
||||
* This will be called from get_format() or avcodec_close(), after hwaccel
|
||||
* and hwaccel_context are already uninitialized.
|
||||
*/
|
||||
int (*uninit)(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
* Size of the private data to allocate in
|
||||
* AVCodecInternal.hwaccel_priv_data.
|
||||
*/
|
||||
int priv_data_size;
|
||||
} AVHWAccel;
|
||||
|
||||
/**
|
||||
@@ -3350,9 +3436,8 @@ void avcodec_register(AVCodec *codec);
|
||||
void avcodec_register_all(void);
|
||||
|
||||
/**
|
||||
* Allocate an AVCodecContext and set its fields to default values. The
|
||||
* resulting struct can be deallocated by calling avcodec_close() on it followed
|
||||
* by av_free().
|
||||
* Allocate an AVCodecContext and set its fields to default values. The
|
||||
* resulting struct should be freed with avcodec_free_context().
|
||||
*
|
||||
* @param codec if non-NULL, allocate private data and initialize defaults
|
||||
* for the given codec. It is illegal to then call avcodec_open2()
|
||||
@@ -3366,6 +3451,12 @@ void avcodec_register_all(void);
|
||||
*/
|
||||
AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
|
||||
|
||||
/**
|
||||
* Free the codec context and everything associated with it and write NULL to
|
||||
* the provided pointer.
|
||||
*/
|
||||
void avcodec_free_context(AVCodecContext **avctx);
|
||||
|
||||
/**
|
||||
* Set the fields of the given AVCodecContext to default values corresponding
|
||||
* to the given codec (defaults may be codec-dependent).
|
||||
@@ -3583,14 +3674,14 @@ int av_dup_packet(AVPacket *pkt);
|
||||
*
|
||||
* @return 0 on success, negative AVERROR on fail
|
||||
*/
|
||||
int av_copy_packet(AVPacket *dst, AVPacket *src);
|
||||
int av_copy_packet(AVPacket *dst, const AVPacket *src);
|
||||
|
||||
/**
|
||||
* Copy packet side data
|
||||
*
|
||||
* @return 0 on success, negative AVERROR on fail
|
||||
*/
|
||||
int av_copy_packet_side_data(AVPacket *dst, AVPacket *src);
|
||||
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
|
||||
|
||||
/**
|
||||
* Free a packet.
|
||||
@@ -3679,7 +3770,7 @@ void av_packet_free_side_data(AVPacket *pkt);
|
||||
*
|
||||
* @return 0 on success, a negative AVERROR on error.
|
||||
*/
|
||||
int av_packet_ref(AVPacket *dst, AVPacket *src);
|
||||
int av_packet_ref(AVPacket *dst, const AVPacket *src);
|
||||
|
||||
/**
|
||||
* Wipe the packet.
|
||||
@@ -3715,6 +3806,19 @@ void av_packet_move_ref(AVPacket *dst, AVPacket *src);
|
||||
*/
|
||||
int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
|
||||
|
||||
/**
|
||||
* Convert valid timing fields (timestamps / durations) in a packet from one
|
||||
* timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
|
||||
* ignored.
|
||||
*
|
||||
* @param pkt packet on which the conversion will be performed
|
||||
* @param tb_src source timebase, in which the timing fields in pkt are
|
||||
* expressed
|
||||
* @param tb_dst destination timebase, to which the timing fields will be
|
||||
* converted
|
||||
*/
|
||||
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
@@ -3983,8 +4087,8 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
||||
* marked with CODEC_CAP_DELAY, then no subtitles will be returned.
|
||||
*
|
||||
* @param avctx the codec context
|
||||
* @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be
|
||||
freed with avsubtitle_free if *got_sub_ptr is set.
|
||||
* @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
|
||||
* must be freed with avsubtitle_free if *got_sub_ptr is set.
|
||||
* @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
|
||||
* @param[in] avpkt The input AVPacket containing the input buffer.
|
||||
*/
|
||||
@@ -4647,30 +4751,8 @@ void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int
|
||||
*/
|
||||
unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
|
||||
|
||||
#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
|
||||
#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
|
||||
#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
|
||||
#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
|
||||
#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
|
||||
#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
|
||||
|
||||
/**
|
||||
* Compute what kind of losses will occur when converting from one specific
|
||||
* pixel format to another.
|
||||
* When converting from one pixel format to another, information loss may occur.
|
||||
* For example, when converting from RGB24 to GRAY, the color information will
|
||||
* be lost. Similarly, other losses occur when converting from some formats to
|
||||
* other formats. These losses can involve loss of chroma, but also loss of
|
||||
* resolution, loss of color depth, loss due to the color space conversion, loss
|
||||
* of the alpha bits or loss due to color quantization.
|
||||
* avcodec_get_fix_fmt_loss() informs you about the various types of losses
|
||||
* which will occur when converting from one pixel format to another.
|
||||
*
|
||||
* @param[in] dst_pix_fmt destination pixel format
|
||||
* @param[in] src_pix_fmt source pixel format
|
||||
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
|
||||
* @return Combination of flags informing you what kind of losses will occur
|
||||
* (maximum loss for an invalid dst_pix_fmt).
|
||||
* @deprecated see av_get_pix_fmt_loss()
|
||||
*/
|
||||
int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
|
||||
int has_alpha);
|
||||
@@ -4697,34 +4779,7 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *p
|
||||
int has_alpha, int *loss_ptr);
|
||||
|
||||
/**
|
||||
* Find the best pixel format to convert to given a certain source pixel
|
||||
* format and a selection of two destination pixel formats. When converting from
|
||||
* one pixel format to another, information loss may occur. For example, when converting
|
||||
* from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when
|
||||
* converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of
|
||||
* the given pixel formats should be used to suffer the least amount of loss.
|
||||
*
|
||||
* If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be
|
||||
* returned.
|
||||
*
|
||||
* @code
|
||||
* src_pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
* dst_pix_fmt1= AV_PIX_FMT_RGB24;
|
||||
* dst_pix_fmt2= AV_PIX_FMT_GRAY8;
|
||||
* dst_pix_fmt3= AV_PIX_FMT_RGB8;
|
||||
* loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored.
|
||||
* dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss);
|
||||
* dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss);
|
||||
* @endcode
|
||||
*
|
||||
* @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from
|
||||
* @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from
|
||||
* @param[in] src_pix_fmt Source pixel format
|
||||
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
|
||||
* @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e.
|
||||
* NULL or value of zero means we care about all losses. Out: the loss
|
||||
* that occurs when converting from src to selected dst pixel format.
|
||||
* @return The best pixel format to convert to or -1 if none was found.
|
||||
* @deprecated see av_find_best_pix_fmt_of_2()
|
||||
*/
|
||||
enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
|
||||
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
|
||||
@@ -4918,7 +4973,7 @@ AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
|
||||
* @return >= 0 in case of success, or a negative error code in case of failure
|
||||
*
|
||||
* If the return value is positive, an output buffer is allocated and
|
||||
* is availble in *poutbuf, and is distinct from the input buffer.
|
||||
* is available in *poutbuf, and is distinct from the input buffer.
|
||||
*
|
||||
* If the return value is 0, the output buffer is not allocated and
|
||||
* should be considered identical to the input buffer, or in case
|
||||
|
||||
84
libs/FFmpeg/include/libavcodec/dv_profile.h
Normal file
84
libs/FFmpeg/include/libavcodec/dv_profile.h
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_DV_PROFILE_H
|
||||
#define AVCODEC_DV_PROFILE_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/rational.h"
|
||||
#include "avcodec.h"
|
||||
|
||||
/* minimum number of bytes to read from a DV stream in order to
|
||||
determine the profile */
|
||||
#define DV_PROFILE_BYTES (6*80) /* 6 DIF blocks */
|
||||
|
||||
/*
|
||||
* AVDVProfile is used to express the differences between various
|
||||
* DV flavors. For now it's primarily used for differentiating
|
||||
* 525/60 and 625/50, but the plans are to use it for various
|
||||
* DV specs as well (e.g. SMPTE314M vs. IEC 61834).
|
||||
*/
|
||||
typedef struct AVDVProfile {
|
||||
int dsf; /* value of the dsf in the DV header */
|
||||
int video_stype; /* stype for VAUX source pack */
|
||||
int frame_size; /* total size of one frame in bytes */
|
||||
int difseg_size; /* number of DIF segments per DIF channel */
|
||||
int n_difchan; /* number of DIF channels per frame */
|
||||
AVRational time_base; /* 1/framerate */
|
||||
int ltc_divisor; /* FPS from the LTS standpoint */
|
||||
int height; /* picture height in pixels */
|
||||
int width; /* picture width in pixels */
|
||||
AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */
|
||||
enum AVPixelFormat pix_fmt; /* picture pixel format */
|
||||
int bpm; /* blocks per macroblock */
|
||||
const uint8_t *block_sizes; /* AC block sizes, in bits */
|
||||
int audio_stride; /* size of audio_shuffle table */
|
||||
int audio_min_samples[3]; /* min amount of audio samples */
|
||||
/* for 48kHz, 44.1kHz and 32kHz */
|
||||
int audio_samples_dist[5]; /* how many samples are supposed to be */
|
||||
/* in each frame in a 5 frames window */
|
||||
const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
|
||||
} AVDVProfile;
|
||||
|
||||
const AVDVProfile* avpriv_dv_frame_profile2(AVCodecContext* codec, const AVDVProfile *sys,
|
||||
const uint8_t* frame, unsigned buf_size);
|
||||
#if LIBAVCODEC_VERSION_MAJOR < 56
|
||||
const AVDVProfile *avpriv_dv_frame_profile(const AVDVProfile *sys,
|
||||
const uint8_t* frame, unsigned buf_size);
|
||||
const AVDVProfile *avpriv_dv_codec_profile(AVCodecContext* codec);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Get a DV profile for the provided compressed frame.
|
||||
*
|
||||
* @param sys the profile used for the previous frame, may be NULL
|
||||
* @param frame the compressed data buffer
|
||||
* @param buf_size size of the buffer in bytes
|
||||
* @return the DV profile for the supplied data or NULL on failure
|
||||
*/
|
||||
const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys,
|
||||
const uint8_t *frame, unsigned buf_size);
|
||||
|
||||
/**
|
||||
* Get a DV profile for the provided stream parameters.
|
||||
*/
|
||||
const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt);
|
||||
|
||||
#endif /* AVCODEC_DV_PROFILE_H */
|
||||
@@ -49,6 +49,7 @@
|
||||
*/
|
||||
|
||||
#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
|
||||
#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface
|
||||
|
||||
/**
|
||||
* This structure is used to provides the necessary configurations and data
|
||||
|
||||
@@ -19,8 +19,6 @@
|
||||
#ifndef AVCODEC_OLD_CODEC_IDS_H
|
||||
#define AVCODEC_OLD_CODEC_IDS_H
|
||||
|
||||
#include "libavutil/common.h"
|
||||
|
||||
/*
|
||||
* This header exists to prevent new codec IDs from being accidentally added to
|
||||
* the deprecated list.
|
||||
|
||||
@@ -29,6 +29,8 @@
|
||||
* Public libavcodec VDA header.
|
||||
*/
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
|
||||
@@ -118,26 +120,17 @@ struct vda_context {
|
||||
OSType cv_pix_fmt_type;
|
||||
|
||||
/**
|
||||
* The current bitstream buffer.
|
||||
*
|
||||
* - encoding: unused
|
||||
* - decoding: Set/Unset by libavcodec.
|
||||
* unused
|
||||
*/
|
||||
uint8_t *priv_bitstream;
|
||||
|
||||
/**
|
||||
* The current size of the bitstream.
|
||||
*
|
||||
* - encoding: unused
|
||||
* - decoding: Set/Unset by libavcodec.
|
||||
* unused
|
||||
*/
|
||||
int priv_bitstream_size;
|
||||
|
||||
/**
|
||||
* The reference size used for fast reallocation.
|
||||
*
|
||||
* - encoding: unused
|
||||
* - decoding: Set/Unset by libavcodec.
|
||||
* unused
|
||||
*/
|
||||
int priv_allocated_size;
|
||||
|
||||
@@ -161,6 +154,58 @@ int ff_vda_create_decoder(struct vda_context *vda_ctx,
|
||||
/** Destroy the video decoder. */
|
||||
int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
|
||||
|
||||
/**
|
||||
* This struct holds all the information that needs to be passed
|
||||
* between the caller and libavcodec for initializing VDA decoding.
|
||||
* Its size is not a part of the public ABI, it must be allocated with
|
||||
* av_vda_alloc_context() and freed with av_free().
|
||||
*/
|
||||
typedef struct AVVDAContext {
|
||||
/**
|
||||
* VDA decoder object. Created and freed by the caller.
|
||||
*/
|
||||
VDADecoder decoder;
|
||||
|
||||
/**
|
||||
* The output callback that must be passed to VDADecoderCreate.
|
||||
* Set by av_vda_alloc_context().
|
||||
*/
|
||||
VDADecoderOutputCallback output_callback;
|
||||
} AVVDAContext;
|
||||
|
||||
/**
|
||||
* Allocate and initialize a VDA context.
|
||||
*
|
||||
* This function should be called from the get_format() callback when the caller
|
||||
* selects the AV_PIX_FMT_VDA format. The caller must then create the decoder
|
||||
* object (using the output callback provided by libavcodec) that will be used
|
||||
* for VDA-accelerated decoding.
|
||||
*
|
||||
* When decoding with VDA is finished, the caller must destroy the decoder
|
||||
* object and free the VDA context using av_free().
|
||||
*
|
||||
* @return the newly allocated context or NULL on failure
|
||||
*/
|
||||
AVVDAContext *av_vda_alloc_context(void);
|
||||
|
||||
/**
|
||||
* This is a convenience function that creates and sets up the VDA context using
|
||||
* an internal implementation.
|
||||
*
|
||||
* @param avctx the corresponding codec context
|
||||
*
|
||||
* @return >= 0 on success, a negative AVERROR code on failure
|
||||
*/
|
||||
int av_vda_default_init(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
* This function must be called to free the VDA context initialized with
|
||||
* av_vda_default_init().
|
||||
*
|
||||
* @param avctx the corresponding codec context
|
||||
*/
|
||||
void av_vda_default_free(AVCodecContext *avctx);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
@@ -29,8 +29,8 @@
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVCODEC_VERSION_MAJOR 55
|
||||
#define LIBAVCODEC_VERSION_MINOR 52
|
||||
#define LIBAVCODEC_VERSION_MICRO 102
|
||||
#define LIBAVCODEC_VERSION_MINOR 69
|
||||
#define LIBAVCODEC_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||
LIBAVCODEC_VERSION_MINOR, \
|
||||
@@ -141,5 +141,26 @@
|
||||
#ifndef FF_API_EMU_EDGE
|
||||
#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 56)
|
||||
#endif
|
||||
#ifndef FF_API_ARCH_SH4
|
||||
#define FF_API_ARCH_SH4 (LIBAVCODEC_VERSION_MAJOR < 56)
|
||||
#endif
|
||||
#ifndef FF_API_ARCH_SPARC
|
||||
#define FF_API_ARCH_SPARC (LIBAVCODEC_VERSION_MAJOR < 56)
|
||||
#endif
|
||||
#ifndef FF_API_INPUT_PRESERVED
|
||||
#define FF_API_INPUT_PRESERVED (LIBAVCODEC_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
#ifndef FF_API_NORMALIZE_AQP
|
||||
#define FF_API_NORMALIZE_AQP (LIBAVCODEC_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
#ifndef FF_API_GMC
|
||||
#define FF_API_GMC (LIBAVCODEC_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
#ifndef FF_API_MV0
|
||||
#define FF_API_MV0 (LIBAVCODEC_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
#ifndef FF_API_CODEC_NAME
|
||||
#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
|
||||
#endif /* AVCODEC_VERSION_H */
|
||||
|
||||
@@ -43,6 +43,9 @@
|
||||
* @}
|
||||
*/
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavformat/avformat.h"
|
||||
|
||||
/**
|
||||
@@ -66,6 +69,42 @@ const char *avdevice_license(void);
|
||||
*/
|
||||
void avdevice_register_all(void);
|
||||
|
||||
/**
|
||||
* Audio input devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered input audio/video device,
|
||||
* if d is non-NULL, returns the next registered input audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
|
||||
|
||||
/**
|
||||
* Video input devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered input audio/video device,
|
||||
* if d is non-NULL, returns the next registered input audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVInputFormat *av_input_video_device_next(AVInputFormat *d);
|
||||
|
||||
/**
|
||||
* Audio output devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered output audio/video device,
|
||||
* if d is non-NULL, returns the next registered output audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
|
||||
|
||||
/**
|
||||
* Video output devices iterator.
|
||||
*
|
||||
* If d is NULL, returns the first registered output audio/video device,
|
||||
* if d is non-NULL, returns the next registered output audio/video device after d
|
||||
* or NULL if d is the last one.
|
||||
*/
|
||||
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
|
||||
|
||||
typedef struct AVDeviceRect {
|
||||
int x; /**< x coordinate of top left corner */
|
||||
int y; /**< y coordinate of top left corner */
|
||||
@@ -96,12 +135,60 @@ enum AVAppToDevMessageType {
|
||||
/**
|
||||
* Repaint request message.
|
||||
*
|
||||
* Message is sent to the device when window have to be rapainted.
|
||||
* Message is sent to the device when window has to be repainted.
|
||||
*
|
||||
* data: AVDeviceRect: area required to be repainted.
|
||||
* NULL: whole area is required to be repainted.
|
||||
*/
|
||||
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A')
|
||||
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
|
||||
|
||||
/**
|
||||
* Request pause/play.
|
||||
*
|
||||
* Application requests pause/unpause playback.
|
||||
* Mostly usable with devices that have internal buffer.
|
||||
* By default devices are not paused.
|
||||
*
|
||||
* data: NULL
|
||||
*/
|
||||
AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
|
||||
AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
|
||||
AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
|
||||
|
||||
/**
|
||||
* Volume control message.
|
||||
*
|
||||
* Set volume level. It may be device-dependent if volume
|
||||
* is changed per stream or system wide. Per stream volume
|
||||
* change is expected when possible.
|
||||
*
|
||||
* data: double: new volume with range of 0.0 - 1.0.
|
||||
*/
|
||||
AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
|
||||
|
||||
/**
|
||||
* Mute control messages.
|
||||
*
|
||||
* Change mute state. It may be device-dependent if mute status
|
||||
* is changed per stream or system wide. Per stream mute status
|
||||
* change is expected when possible.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
|
||||
AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
|
||||
AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
|
||||
|
||||
/**
|
||||
* Get volume/mute messages.
|
||||
*
|
||||
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
|
||||
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
|
||||
AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -144,7 +231,7 @@ enum AVDevToAppMessageType {
|
||||
* Display window buffer message.
|
||||
*
|
||||
* Device requests to display a window buffer.
|
||||
* Message is sent when new frame is ready to be displyed.
|
||||
* Message is sent when new frame is ready to be displayed.
|
||||
* Usually buffers need to be swapped in handler of this message.
|
||||
*
|
||||
* data: NULL.
|
||||
@@ -160,7 +247,49 @@ enum AVDevToAppMessageType {
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S')
|
||||
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
|
||||
|
||||
/**
|
||||
* Buffer fullness status messages.
|
||||
*
|
||||
* Device signals buffer overflow/underflow.
|
||||
*
|
||||
* data: NULL.
|
||||
*/
|
||||
AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
|
||||
AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
|
||||
|
||||
/**
|
||||
* Buffer readable/writable.
|
||||
*
|
||||
* Device informs that buffer is readable/writable.
|
||||
* When possible, device informs how many bytes can be read/write.
|
||||
*
|
||||
* @warning Device may not inform when number of bytes than can be read/write changes.
|
||||
*
|
||||
* data: int64_t: amount of bytes available to read/write.
|
||||
* NULL: amount of bytes available to read/write is not known.
|
||||
*/
|
||||
AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
|
||||
AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
|
||||
|
||||
/**
|
||||
* Mute state change message.
|
||||
*
|
||||
* Device informs that mute state has changed.
|
||||
*
|
||||
* data: int: 0 for not muted state, non-zero for muted state.
|
||||
*/
|
||||
AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
|
||||
|
||||
/**
|
||||
* Volume level change message.
|
||||
*
|
||||
* Device informs that volume level has changed.
|
||||
*
|
||||
* data: double: new volume with range of 0.0 - 1.0.
|
||||
*/
|
||||
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -191,6 +320,131 @@ int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
|
||||
enum AVDevToAppMessageType type,
|
||||
void *data, size_t data_size);
|
||||
|
||||
/**
|
||||
* Following API allows user to probe device capabilities (supported codecs,
|
||||
* pixel formats, sample formats, resolutions, channel counts, etc).
|
||||
* It is build on top op AVOption API.
|
||||
* Queried capabilities allows to set up converters of video or audio
|
||||
* parameters that fit to the device.
|
||||
*
|
||||
* List of capabilities that can be queried:
|
||||
* - Capabilities valid for both audio and video devices:
|
||||
* - codec: supported audio/video codecs.
|
||||
* type: AV_OPT_TYPE_INT (AVCodecID value)
|
||||
* - Capabilities valid for audio devices:
|
||||
* - sample_format: supported sample formats.
|
||||
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
|
||||
* - sample_rate: supported sample rates.
|
||||
* type: AV_OPT_TYPE_INT
|
||||
* - channels: supported number of channels.
|
||||
* type: AV_OPT_TYPE_INT
|
||||
* - channel_layout: supported channel layouts.
|
||||
* type: AV_OPT_TYPE_INT64
|
||||
* - Capabilities valid for video devices:
|
||||
* - pixel_format: supported pixel formats.
|
||||
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
|
||||
* - window_size: supported window sizes (describes size of the window size presented to the user).
|
||||
* type: AV_OPT_TYPE_IMAGE_SIZE
|
||||
* - frame_size: supported frame sizes (describes size of provided video frames).
|
||||
* type: AV_OPT_TYPE_IMAGE_SIZE
|
||||
* - fps: supported fps values
|
||||
* type: AV_OPT_TYPE_RATIONAL
|
||||
*
|
||||
* Value of the capability may be set by user using av_opt_set() function
|
||||
* and AVDeviceCapabilitiesQuery object. Following queries will
|
||||
* limit results to the values matching already set capabilities.
|
||||
* For example, setting a codec may impact number of formats or fps values
|
||||
* returned during next query. Setting invalid value may limit results to zero.
|
||||
*
|
||||
* Example of the usage basing on opengl output device:
|
||||
*
|
||||
* @code
|
||||
* AVFormatContext *oc = NULL;
|
||||
* AVDeviceCapabilitiesQuery *caps = NULL;
|
||||
* AVOptionRanges *ranges;
|
||||
* int ret;
|
||||
*
|
||||
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
|
||||
* goto fail;
|
||||
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
|
||||
* goto fail;
|
||||
*
|
||||
* //query codecs
|
||||
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
|
||||
* goto fail;
|
||||
* //pick codec here and set it
|
||||
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
|
||||
*
|
||||
* //query format
|
||||
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
|
||||
* goto fail;
|
||||
* //pick format here and set it
|
||||
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
|
||||
*
|
||||
* //query and set more capabilities
|
||||
*
|
||||
* fail:
|
||||
* //clean up code
|
||||
* avdevice_capabilities_free(&query, oc);
|
||||
* avformat_free_context(oc);
|
||||
* @endcode
|
||||
*/
|
||||
|
||||
/**
|
||||
* Structure describes device capabilities.
|
||||
*
|
||||
* It is used by devices in conjunction with av_device_capabilities AVOption table
|
||||
* to implement capabilities probing API based on AVOption API. Should not be used directly.
|
||||
*/
|
||||
typedef struct AVDeviceCapabilitiesQuery {
|
||||
const AVClass *av_class;
|
||||
AVFormatContext *device_context;
|
||||
enum AVCodecID codec;
|
||||
enum AVSampleFormat sample_format;
|
||||
enum AVPixelFormat pixel_format;
|
||||
int sample_rate;
|
||||
int channels;
|
||||
int64_t channel_layout;
|
||||
int window_width;
|
||||
int window_height;
|
||||
int frame_width;
|
||||
int frame_height;
|
||||
AVRational fps;
|
||||
} AVDeviceCapabilitiesQuery;
|
||||
|
||||
/**
|
||||
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
|
||||
*/
|
||||
extern const AVOption av_device_capabilities[];
|
||||
|
||||
/**
|
||||
* Initialize capabilities probing API based on AVOption API.
|
||||
*
|
||||
* avdevice_capabilities_free() must be called when query capabilities API is
|
||||
* not used anymore.
|
||||
*
|
||||
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
|
||||
* @param s Context of the device.
|
||||
* @param device_options An AVDictionary filled with device-private options.
|
||||
* On return this parameter will be destroyed and replaced with a dict
|
||||
* containing options that were not found. May be NULL.
|
||||
* The same options must be passed later to avformat_write_header() for output
|
||||
* devices or avformat_open_input() for input devices, or at any other place
|
||||
* that affects device-private options.
|
||||
*
|
||||
* @return >= 0 on success, negative otherwise.
|
||||
*/
|
||||
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
|
||||
AVDictionary **device_options);
|
||||
|
||||
/**
|
||||
* Free resources created by avdevice_capabilities_create()
|
||||
*
|
||||
* @param caps Device capabilities data to be freed.
|
||||
* @param s Context of the device.
|
||||
*/
|
||||
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
|
||||
|
||||
/**
|
||||
* Structure describes basic parameters of the device.
|
||||
*/
|
||||
@@ -224,7 +478,7 @@ typedef struct AVDeviceInfoList {
|
||||
int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
|
||||
|
||||
/**
|
||||
* Convinient function to free result of avdevice_list_devices().
|
||||
* Convenient function to free result of avdevice_list_devices().
|
||||
*
|
||||
* @param devices device list to be freed.
|
||||
*/
|
||||
|
||||
@@ -28,8 +28,8 @@
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVDEVICE_VERSION_MAJOR 55
|
||||
#define LIBAVDEVICE_VERSION_MINOR 10
|
||||
#define LIBAVDEVICE_VERSION_MICRO 100
|
||||
#define LIBAVDEVICE_VERSION_MINOR 13
|
||||
#define LIBAVDEVICE_VERSION_MICRO 102
|
||||
|
||||
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
|
||||
LIBAVDEVICE_VERSION_MINOR, \
|
||||
|
||||
@@ -199,7 +199,7 @@ typedef struct AVFilterBufferRef {
|
||||
* Copy properties of src to dst, without copying the actual data
|
||||
*/
|
||||
attribute_deprecated
|
||||
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);
|
||||
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src);
|
||||
|
||||
/**
|
||||
* Add a new reference to a buffer.
|
||||
@@ -668,7 +668,7 @@ struct AVFilterContext {
|
||||
* allowed threading types. I.e. a threading type needs to be set in both
|
||||
* to be allowed.
|
||||
*
|
||||
* After the filter is initialzed, libavfilter sets this field to the
|
||||
* After the filter is initialized, libavfilter sets this field to the
|
||||
* threading type that is actually used (0 for no multithreading).
|
||||
*/
|
||||
int thread_type;
|
||||
@@ -830,7 +830,7 @@ struct AVFilterLink {
|
||||
|
||||
/**
|
||||
* True if the link is closed.
|
||||
* If set, all attemps of start_frame, filter_frame or request_frame
|
||||
* If set, all attempts of start_frame, filter_frame or request_frame
|
||||
* will fail with AVERROR_EOF, and if necessary the reference will be
|
||||
* destroyed.
|
||||
* If request_frame returns AVERROR_EOF, this flag is set on the
|
||||
@@ -1260,19 +1260,21 @@ AVFilterGraph *avfilter_graph_alloc(void);
|
||||
*
|
||||
* @return the context of the newly created filter instance (note that it is
|
||||
* also retrievable directly through AVFilterGraph.filters or with
|
||||
* avfilter_graph_get_filter()) on success or NULL or failure.
|
||||
* avfilter_graph_get_filter()) on success or NULL on failure.
|
||||
*/
|
||||
AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
|
||||
const AVFilter *filter,
|
||||
const char *name);
|
||||
|
||||
/**
|
||||
* Get a filter instance with name name from graph.
|
||||
* Get a filter instance identified by instance name from graph.
|
||||
*
|
||||
* @param graph filter graph to search through.
|
||||
* @param name filter instance name (should be unique in the graph).
|
||||
* @return the pointer to the found filter instance or NULL if it
|
||||
* cannot be found.
|
||||
*/
|
||||
AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, char *name);
|
||||
AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
|
||||
|
||||
#if FF_API_AVFILTER_OPEN
|
||||
/**
|
||||
@@ -1384,7 +1386,7 @@ void avfilter_inout_free(AVFilterInOut **inout);
|
||||
* outputs of the already existing filters, which are provided as
|
||||
* inputs to the parsed filters.
|
||||
*
|
||||
* @param graph the filter graph where to link the parsed grap context
|
||||
* @param graph the filter graph where to link the parsed graph context
|
||||
* @param filters string to be parsed
|
||||
* @param inputs linked list to the inputs of the graph
|
||||
* @param outputs linked list to the outputs of the graph
|
||||
@@ -1482,7 +1484,7 @@ int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const
|
||||
* "all" sends to all filters
|
||||
* otherwise it can be a filter or filter instance name
|
||||
* which will send the command to all matching filters.
|
||||
* @param cmd the command to sent, for handling simplicity all commands must be alphanummeric only
|
||||
* @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
|
||||
* @param arg the argument for the command
|
||||
* @param ts time at which the command should be sent to the filter
|
||||
*
|
||||
|
||||
@@ -138,7 +138,7 @@ int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
|
||||
* Add a frame to the buffer source.
|
||||
*
|
||||
* By default, if the frame is reference-counted, this function will take
|
||||
* ownership of the reference(s) and reset the frame. This can be controled
|
||||
* ownership of the reference(s) and reset the frame. This can be controlled
|
||||
* using the flags.
|
||||
*
|
||||
* If this function returns an error, the input frame is not touched.
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 4
|
||||
#define LIBAVFILTER_VERSION_MINOR 2
|
||||
#define LIBAVFILTER_VERSION_MINOR 11
|
||||
#define LIBAVFILTER_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
|
||||
@@ -194,7 +194,7 @@
|
||||
* the @ref AVStream.codec "stream codec context" information, such as the
|
||||
* codec @ref AVCodecContext.codec_type "type", @ref AVCodecContext.codec_id
|
||||
* "id" and other parameters (e.g. width / height, the pixel or sample format,
|
||||
* etc.) as known. The @ref AVCodecContext.time_base "codec timebase" should
|
||||
* etc.) as known. The @ref AVStream.time_base "stream timebase" should
|
||||
* be set to the timebase that the caller desires to use for this stream (note
|
||||
* that the timebase actually used by the muxer can be different, as will be
|
||||
* described later).
|
||||
@@ -218,8 +218,8 @@
|
||||
* a single muxing context, they should not be mixed). Do note that the timing
|
||||
* information on the packets sent to the muxer must be in the corresponding
|
||||
* AVStream's timebase. That timebase is set by the muxer (in the
|
||||
* avformat_write_header() step) and may be different from the timebase the
|
||||
* caller set on the codec context.
|
||||
* avformat_write_header() step) and may be different from the timebase
|
||||
* requested by the caller.
|
||||
*
|
||||
* Once all the data has been written, the caller must call av_write_trailer()
|
||||
* to flush any buffered packets and finalize the output file, then close the IO
|
||||
@@ -262,6 +262,7 @@
|
||||
struct AVFormatContext;
|
||||
|
||||
struct AVDeviceInfoList;
|
||||
struct AVDeviceCapabilitiesQuery;
|
||||
|
||||
/**
|
||||
* @defgroup metadata_api Public Metadata API
|
||||
@@ -367,6 +368,7 @@ int av_get_packet(AVIOContext *s, AVPacket *pkt, int size);
|
||||
*/
|
||||
int av_append_packet(AVIOContext *s, AVPacket *pkt, int size);
|
||||
|
||||
#if FF_API_LAVF_FRAC
|
||||
/*************************************************/
|
||||
/* fractional numbers for exact pts handling */
|
||||
|
||||
@@ -377,6 +379,7 @@ int av_append_packet(AVIOContext *s, AVPacket *pkt, int size);
|
||||
typedef struct AVFrac {
|
||||
int64_t val, num, den;
|
||||
} AVFrac;
|
||||
#endif
|
||||
|
||||
/*************************************************/
|
||||
/* input/output formats */
|
||||
@@ -531,6 +534,16 @@ typedef struct AVOutputFormat {
|
||||
* @see avdevice_list_devices() for more details.
|
||||
*/
|
||||
int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);
|
||||
/**
|
||||
* Initialize device capabilities submodule.
|
||||
* @see avdevice_capabilities_create() for more details.
|
||||
*/
|
||||
int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
|
||||
/**
|
||||
* Free device capabilities submodule.
|
||||
* @see avdevice_capabilities_free() for more details.
|
||||
*/
|
||||
int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
|
||||
} AVOutputFormat;
|
||||
/**
|
||||
* @}
|
||||
@@ -665,6 +678,18 @@ typedef struct AVInputFormat {
|
||||
* @see avdevice_list_devices() for more details.
|
||||
*/
|
||||
int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);
|
||||
|
||||
/**
|
||||
* Initialize device capabilities submodule.
|
||||
* @see avdevice_capabilities_create() for more details.
|
||||
*/
|
||||
int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
|
||||
|
||||
/**
|
||||
* Free device capabilities submodule.
|
||||
* @see avdevice_capabilities_free() for more details.
|
||||
*/
|
||||
int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
|
||||
} AVInputFormat;
|
||||
/**
|
||||
* @}
|
||||
@@ -762,19 +787,25 @@ typedef struct AVStream {
|
||||
AVCodecContext *codec;
|
||||
void *priv_data;
|
||||
|
||||
#if FF_API_LAVF_FRAC
|
||||
/**
|
||||
* encoding: pts generation when outputting stream
|
||||
* @deprecated this field is unused
|
||||
*/
|
||||
attribute_deprecated
|
||||
struct AVFrac pts;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented.
|
||||
*
|
||||
* decoding: set by libavformat
|
||||
* encoding: set by libavformat in avformat_write_header. The muxer may use the
|
||||
* user-provided value of @ref AVCodecContext.time_base "codec->time_base"
|
||||
* as a hint.
|
||||
* encoding: May be set by the caller before avformat_write_header() to
|
||||
* provide a hint to the muxer about the desired timebase. In
|
||||
* avformat_write_header(), the muxer will overwrite this field
|
||||
* with the timebase that will actually be used for the timestamps
|
||||
* written into the file (which may or may not be related to the
|
||||
* user-provided one, depending on the format).
|
||||
*/
|
||||
AVRational time_base;
|
||||
|
||||
@@ -812,6 +843,10 @@ typedef struct AVStream {
|
||||
|
||||
/**
|
||||
* Average framerate
|
||||
*
|
||||
* - demuxing: May be set by libavformat when creating the stream or in
|
||||
* avformat_find_stream_info().
|
||||
* - muxing: May be set by the caller before avformat_write_header().
|
||||
*/
|
||||
AVRational avg_frame_rate;
|
||||
|
||||
@@ -824,6 +859,30 @@ typedef struct AVStream {
|
||||
*/
|
||||
AVPacket attached_pic;
|
||||
|
||||
/**
|
||||
* An array of side data that applies to the whole stream (i.e. the
|
||||
* container does not allow it to change between packets).
|
||||
*
|
||||
* There may be no overlap between the side data in this array and side data
|
||||
* in the packets. I.e. a given side data is either exported by the muxer
|
||||
* (demuxing) / set by the caller (muxing) in this array, then it never
|
||||
* appears in the packets, or the side data is exported / sent through
|
||||
* the packets (always in the first packet where the value becomes known or
|
||||
* changes), then it does not appear in this array.
|
||||
*
|
||||
* - demuxing: Set by libavformat when the stream is created.
|
||||
* - muxing: May be set by the caller before avformat_write_header().
|
||||
*
|
||||
* Freed by libavformat in avformat_free_context().
|
||||
*
|
||||
* @see av_format_inject_global_side_data()
|
||||
*/
|
||||
AVPacketSideData *side_data;
|
||||
/**
|
||||
* The number of elements in the AVStream.side_data array.
|
||||
*/
|
||||
int nb_side_data;
|
||||
|
||||
/*****************************************************************
|
||||
* All fields below this line are not part of the public API. They
|
||||
* may not be used outside of libavformat and can be changed and
|
||||
@@ -844,6 +903,12 @@ typedef struct AVStream {
|
||||
double (*duration_error)[2][MAX_STD_TIMEBASES];
|
||||
int64_t codec_info_duration;
|
||||
int64_t codec_info_duration_fields;
|
||||
|
||||
/**
|
||||
* 0 -> decoder has not been searched for yet.
|
||||
* >0 -> decoder found
|
||||
* <0 -> decoder with codec_id == -found_decoder has not been found
|
||||
*/
|
||||
int found_decoder;
|
||||
|
||||
int64_t last_duration;
|
||||
@@ -995,10 +1060,23 @@ typedef struct AVStream {
|
||||
uint8_t dts_ordered;
|
||||
uint8_t dts_misordered;
|
||||
|
||||
/**
|
||||
* Internal data to inject global side data
|
||||
*/
|
||||
int inject_global_side_data;
|
||||
|
||||
} AVStream;
|
||||
|
||||
AVRational av_stream_get_r_frame_rate(const AVStream *s);
|
||||
void av_stream_set_r_frame_rate(AVStream *s, AVRational r);
|
||||
struct AVCodecParserContext *av_stream_get_parser(const AVStream *s);
|
||||
|
||||
/**
|
||||
* Returns the pts of the last muxed packet + its duration
|
||||
*
|
||||
* the retuned value is undefined when used with a demuxer.
|
||||
*/
|
||||
int64_t av_stream_get_end_pts(const AVStream *st);
|
||||
|
||||
#define AV_PROGRAM_RUNNING 1
|
||||
|
||||
@@ -1117,7 +1195,11 @@ typedef struct AVFormatContext {
|
||||
AVIOContext *pb;
|
||||
|
||||
/* stream info */
|
||||
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
|
||||
/**
|
||||
* Flags signalling stream properties. A combination of AVFMTCTX_*.
|
||||
* Set by libavformat.
|
||||
*/
|
||||
int ctx_flags;
|
||||
|
||||
/**
|
||||
* Number of elements in AVFormatContext.streams.
|
||||
@@ -1175,6 +1257,10 @@ typedef struct AVFormatContext {
|
||||
unsigned int packet_size;
|
||||
int max_delay;
|
||||
|
||||
/**
|
||||
* Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*.
|
||||
* Set by the user before avformat_open_input() / avformat_write_header().
|
||||
*/
|
||||
int flags;
|
||||
#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames.
|
||||
#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index.
|
||||
@@ -1186,6 +1272,13 @@ typedef struct AVFormatContext {
|
||||
#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.
|
||||
#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted
|
||||
#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet.
|
||||
/**
|
||||
* When muxing, try to avoid writing any random/volatile data to the output.
|
||||
* This includes any random IDs, real-time timestamps/dates, muxer version, etc.
|
||||
*
|
||||
* This flag is mainly intended for testing.
|
||||
*/
|
||||
#define AVFMT_FLAG_BITEXACT 0x0400
|
||||
#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
|
||||
#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
|
||||
#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
|
||||
@@ -1199,10 +1292,9 @@ typedef struct AVFormatContext {
|
||||
unsigned int probesize;
|
||||
|
||||
/**
|
||||
* Maximum duration (in AV_TIME_BASE units) of the data read
|
||||
* from input in avformat_find_stream_info().
|
||||
* Demuxing only, set by the caller before avformat_find_stream_info().
|
||||
* @deprecated deprecated in favor of max_analyze_duration2
|
||||
*/
|
||||
attribute_deprecated
|
||||
int max_analyze_duration;
|
||||
|
||||
const uint8_t *key;
|
||||
@@ -1275,7 +1367,12 @@ typedef struct AVFormatContext {
|
||||
* Start time of the stream in real world time, in microseconds
|
||||
* since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the
|
||||
* stream was captured at this real world time.
|
||||
* Muxing only, set by the caller before avformat_write_header().
|
||||
* - muxing: Set by the caller before avformat_write_header(). If set to
|
||||
* either 0 or AV_NOPTS_VALUE, then the current wall-time will
|
||||
* be used.
|
||||
* - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that
|
||||
* the value may become known after some number of frames
|
||||
* have been received.
|
||||
*/
|
||||
int64_t start_time_realtime;
|
||||
|
||||
@@ -1328,6 +1425,12 @@ typedef struct AVFormatContext {
|
||||
*/
|
||||
int64_t max_interleave_delta;
|
||||
|
||||
/**
|
||||
* Allow non-standard and experimental extension
|
||||
* @see AVCodecContext.strict_std_compliance
|
||||
*/
|
||||
int strict_std_compliance;
|
||||
|
||||
/**
|
||||
* Transport stream id.
|
||||
* This will be moved into demuxer private options. Thus no API/ABI compatibility
|
||||
@@ -1397,7 +1500,7 @@ typedef struct AVFormatContext {
|
||||
* - encoding: unused
|
||||
* - decoding: Set by user via AVOptions (NO direct access)
|
||||
*/
|
||||
unsigned int skip_initial_bytes;
|
||||
int64_t skip_initial_bytes;
|
||||
|
||||
/**
|
||||
* Correct single timestamp overflows
|
||||
@@ -1429,6 +1532,13 @@ typedef struct AVFormatContext {
|
||||
*/
|
||||
int probe_score;
|
||||
|
||||
/**
|
||||
* number of bytes to read maximally to identify format.
|
||||
* - encoding: unused
|
||||
* - decoding: set by user through AVOPtions (NO direct access)
|
||||
*/
|
||||
int format_probesize;
|
||||
|
||||
/*****************************************************************
|
||||
* All fields below this line are not part of the public API. They
|
||||
* may not be used outside of libavformat and can be changed and
|
||||
@@ -1541,6 +1651,15 @@ typedef struct AVFormatContext {
|
||||
* Muxing: set by user via AVOptions (NO direct access)
|
||||
*/
|
||||
int64_t output_ts_offset;
|
||||
|
||||
/**
|
||||
* Maximum duration (in AV_TIME_BASE units) of the data read
|
||||
* from input in avformat_find_stream_info().
|
||||
* Demuxing only, set by the caller before avformat_find_stream_info()
|
||||
* via AVOptions (NO direct access).
|
||||
* Can be set to 0 to let avformat choose using a heuristic.
|
||||
*/
|
||||
int64_t max_analyze_duration2;
|
||||
} AVFormatContext;
|
||||
|
||||
int av_format_get_probe_score(const AVFormatContext *s);
|
||||
@@ -1557,6 +1676,12 @@ void av_format_set_opaque(AVFormatContext *s, void *opaque);
|
||||
av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s);
|
||||
void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback);
|
||||
|
||||
/**
|
||||
* This function will cause global side data to be injected in the next packet
|
||||
* of each stream as well as after any subsequent seek.
|
||||
*/
|
||||
void av_format_inject_global_side_data(AVFormatContext *s);
|
||||
|
||||
/**
|
||||
* Returns the method used to set ctx->duration.
|
||||
*
|
||||
@@ -1678,6 +1803,17 @@ const AVClass *avformat_get_class(void);
|
||||
*/
|
||||
AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);
|
||||
|
||||
/**
|
||||
* Get side information from stream.
|
||||
*
|
||||
* @param stream stream
|
||||
* @param type desired side information type
|
||||
* @param size pointer for side information size to store (optional)
|
||||
* @return pointer to data if present or NULL otherwise
|
||||
*/
|
||||
uint8_t *av_stream_get_side_data(AVStream *stream,
|
||||
enum AVPacketSideDataType type, int *size);
|
||||
|
||||
AVProgram *av_new_program(AVFormatContext *s, int id);
|
||||
|
||||
/**
|
||||
@@ -2271,7 +2407,7 @@ void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size);
|
||||
* @param dump_payload True if the payload must be displayed, too.
|
||||
* @param st AVStream that the packet belongs to
|
||||
*/
|
||||
void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);
|
||||
void av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st);
|
||||
|
||||
|
||||
/**
|
||||
@@ -2285,8 +2421,8 @@ void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st);
|
||||
* @param dump_payload True if the payload must be displayed, too.
|
||||
* @param st AVStream that the packet belongs to
|
||||
*/
|
||||
void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
|
||||
AVStream *st);
|
||||
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload,
|
||||
const AVStream *st);
|
||||
|
||||
/**
|
||||
* Get the AVCodecID for the given codec tag tag.
|
||||
@@ -2372,6 +2508,16 @@ void av_url_split(char *proto, int proto_size,
|
||||
const char *url);
|
||||
|
||||
|
||||
/**
|
||||
* Print detailed information about the input or output format, such as
|
||||
* duration, bitrate, streams, container, programs, metadata, side data,
|
||||
* codec and time base.
|
||||
*
|
||||
* @param ic the context to analyze
|
||||
* @param index index of the stream to dump information about
|
||||
* @param url the URL to print, such as source or destination file
|
||||
* @param is_output Select whether the specified context is an input(0) or output(1)
|
||||
*/
|
||||
void av_dump_format(AVFormatContext *ic,
|
||||
int index,
|
||||
const char *url,
|
||||
|
||||
@@ -146,6 +146,13 @@ typedef struct AVIOContext {
|
||||
* This field is internal to libavformat and access from outside is not allowed.
|
||||
*/
|
||||
int writeout_count;
|
||||
|
||||
/**
|
||||
* Original buffer size
|
||||
* used internally after probing and ensure seekback to reset the buffer size
|
||||
* This field is internal to libavformat and access from outside is not allowed.
|
||||
*/
|
||||
int orig_buffer_size;
|
||||
} AVIOContext;
|
||||
|
||||
/* unbuffered I/O */
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVFORMAT_VERSION_MAJOR 55
|
||||
#define LIBAVFORMAT_VERSION_MINOR 33
|
||||
#define LIBAVFORMAT_VERSION_MINOR 48
|
||||
#define LIBAVFORMAT_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
|
||||
@@ -51,6 +51,15 @@
|
||||
#ifndef FF_API_REFERENCE_DTS
|
||||
#define FF_API_REFERENCE_DTS (LIBAVFORMAT_VERSION_MAJOR < 56)
|
||||
#endif
|
||||
#ifndef FF_API_LAVF_BITEXACT
|
||||
#define FF_API_LAVF_BITEXACT (LIBAVFORMAT_VERSION_MAJOR < 56)
|
||||
#endif
|
||||
#ifndef FF_API_LAVF_FRAC
|
||||
#define FF_API_LAVF_FRAC (LIBAVFORMAT_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
#ifndef FF_API_LAVF_CODEC_TB
|
||||
#define FF_API_LAVF_CODEC_TB (LIBAVFORMAT_VERSION_MAJOR < 57)
|
||||
#endif
|
||||
|
||||
#ifndef FF_API_ALLOC_OUTPUT_CONTEXT
|
||||
#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56)
|
||||
|
||||
429
libs/FFmpeg/include/libavresample/avresample.h
Normal file
429
libs/FFmpeg/include/libavresample/avresample.h
Normal file
@@ -0,0 +1,429 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVRESAMPLE_AVRESAMPLE_H
|
||||
#define AVRESAMPLE_AVRESAMPLE_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavr
|
||||
* external API header
|
||||
*/
|
||||
|
||||
/**
|
||||
* @defgroup lavr Libavresample
|
||||
* @{
|
||||
*
|
||||
* Libavresample (lavr) is a library that handles audio resampling, sample
|
||||
* format conversion and mixing.
|
||||
*
|
||||
* Interaction with lavr is done through AVAudioResampleContext, which is
|
||||
* allocated with avresample_alloc_context(). It is opaque, so all parameters
|
||||
* must be set with the @ref avoptions API.
|
||||
*
|
||||
* For example the following code will setup conversion from planar float sample
|
||||
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
|
||||
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
|
||||
* matrix):
|
||||
* @code
|
||||
* AVAudioResampleContext *avr = avresample_alloc_context();
|
||||
* av_opt_set_int(avr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
|
||||
* av_opt_set_int(avr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
|
||||
* av_opt_set_int(avr, "in_sample_rate", 48000, 0);
|
||||
* av_opt_set_int(avr, "out_sample_rate", 44100, 0);
|
||||
* av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
|
||||
* av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
* @endcode
|
||||
*
|
||||
* Once the context is initialized, it must be opened with avresample_open(). If
|
||||
* you need to change the conversion parameters, you must close the context with
|
||||
* avresample_close(), change the parameters as described above, then reopen it
|
||||
* again.
|
||||
*
|
||||
* The conversion itself is done by repeatedly calling avresample_convert().
|
||||
* Note that the samples may get buffered in two places in lavr. The first one
|
||||
* is the output FIFO, where the samples end up if the output buffer is not
|
||||
* large enough. The data stored in there may be retrieved at any time with
|
||||
* avresample_read(). The second place is the resampling delay buffer,
|
||||
* applicable only when resampling is done. The samples in it require more input
|
||||
* before they can be processed. Their current amount is returned by
|
||||
* avresample_get_delay(). At the end of conversion the resampling buffer can be
|
||||
* flushed by calling avresample_convert() with NULL input.
|
||||
*
|
||||
* The following code demonstrates the conversion loop assuming the parameters
|
||||
* from above and caller-defined functions get_input() and handle_output():
|
||||
* @code
|
||||
* uint8_t **input;
|
||||
* int in_linesize, in_samples;
|
||||
*
|
||||
* while (get_input(&input, &in_linesize, &in_samples)) {
|
||||
* uint8_t *output
|
||||
* int out_linesize;
|
||||
* int out_samples = avresample_get_out_samples(avr, in_samples);
|
||||
*
|
||||
* av_samples_alloc(&output, &out_linesize, 2, out_samples,
|
||||
* AV_SAMPLE_FMT_S16, 0);
|
||||
* out_samples = avresample_convert(avr, &output, out_linesize, out_samples,
|
||||
* input, in_linesize, in_samples);
|
||||
* handle_output(output, out_linesize, out_samples);
|
||||
* av_freep(&output);
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* When the conversion is finished and the FIFOs are flushed if required, the
|
||||
* conversion context and everything associated with it must be freed with
|
||||
* avresample_free().
|
||||
*/
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
#include "libavresample/version.h"
|
||||
|
||||
#define AVRESAMPLE_MAX_CHANNELS 32
|
||||
|
||||
typedef struct AVAudioResampleContext AVAudioResampleContext;
|
||||
|
||||
/** Mixing Coefficient Types */
|
||||
enum AVMixCoeffType {
|
||||
AV_MIX_COEFF_TYPE_Q8, /** 16-bit 8.8 fixed-point */
|
||||
AV_MIX_COEFF_TYPE_Q15, /** 32-bit 17.15 fixed-point */
|
||||
AV_MIX_COEFF_TYPE_FLT, /** floating-point */
|
||||
AV_MIX_COEFF_TYPE_NB, /** Number of coeff types. Not part of ABI */
|
||||
};
|
||||
|
||||
/** Resampling Filter Types */
|
||||
enum AVResampleFilterType {
|
||||
AV_RESAMPLE_FILTER_TYPE_CUBIC, /**< Cubic */
|
||||
AV_RESAMPLE_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
|
||||
AV_RESAMPLE_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
|
||||
};
|
||||
|
||||
enum AVResampleDitherMethod {
|
||||
AV_RESAMPLE_DITHER_NONE, /**< Do not use dithering */
|
||||
AV_RESAMPLE_DITHER_RECTANGULAR, /**< Rectangular Dither */
|
||||
AV_RESAMPLE_DITHER_TRIANGULAR, /**< Triangular Dither*/
|
||||
AV_RESAMPLE_DITHER_TRIANGULAR_HP, /**< Triangular Dither with High Pass */
|
||||
AV_RESAMPLE_DITHER_TRIANGULAR_NS, /**< Triangular Dither with Noise Shaping */
|
||||
AV_RESAMPLE_DITHER_NB, /**< Number of dither types. Not part of ABI. */
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the LIBAVRESAMPLE_VERSION_INT constant.
|
||||
*/
|
||||
unsigned avresample_version(void);
|
||||
|
||||
/**
|
||||
* Return the libavresample build-time configuration.
|
||||
* @return configure string
|
||||
*/
|
||||
const char *avresample_configuration(void);
|
||||
|
||||
/**
|
||||
* Return the libavresample license.
|
||||
*/
|
||||
const char *avresample_license(void);
|
||||
|
||||
/**
|
||||
* Get the AVClass for AVAudioResampleContext.
|
||||
*
|
||||
* Can be used in combination with AV_OPT_SEARCH_FAKE_OBJ for examining options
|
||||
* without allocating a context.
|
||||
*
|
||||
* @see av_opt_find().
|
||||
*
|
||||
* @return AVClass for AVAudioResampleContext
|
||||
*/
|
||||
const AVClass *avresample_get_class(void);
|
||||
|
||||
/**
|
||||
* Allocate AVAudioResampleContext and set options.
|
||||
*
|
||||
* @return allocated audio resample context, or NULL on failure
|
||||
*/
|
||||
AVAudioResampleContext *avresample_alloc_context(void);
|
||||
|
||||
/**
|
||||
* Initialize AVAudioResampleContext.
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_open(AVAudioResampleContext *avr);
|
||||
|
||||
/**
|
||||
* Check whether an AVAudioResampleContext is open or closed.
|
||||
*
|
||||
* @param avr AVAudioResampleContext to check
|
||||
* @return 1 if avr is open, 0 if avr is closed.
|
||||
*/
|
||||
int avresample_is_open(AVAudioResampleContext *avr);
|
||||
|
||||
/**
|
||||
* Close AVAudioResampleContext.
|
||||
*
|
||||
* This closes the context, but it does not change the parameters. The context
|
||||
* can be reopened with avresample_open(). It does, however, clear the output
|
||||
* FIFO and any remaining leftover samples in the resampling delay buffer. If
|
||||
* there was a custom matrix being used, that is also cleared.
|
||||
*
|
||||
* @see avresample_convert()
|
||||
* @see avresample_set_matrix()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
*/
|
||||
void avresample_close(AVAudioResampleContext *avr);
|
||||
|
||||
/**
|
||||
* Free AVAudioResampleContext and associated AVOption values.
|
||||
*
|
||||
* This also calls avresample_close() before freeing.
|
||||
*
|
||||
* @param avr audio resample context
|
||||
*/
|
||||
void avresample_free(AVAudioResampleContext **avr);
|
||||
|
||||
/**
|
||||
* Generate a channel mixing matrix.
|
||||
*
|
||||
* This function is the one used internally by libavresample for building the
|
||||
* default mixing matrix. It is made public just as a utility function for
|
||||
* building custom matrices.
|
||||
*
|
||||
* @param in_layout input channel layout
|
||||
* @param out_layout output channel layout
|
||||
* @param center_mix_level mix level for the center channel
|
||||
* @param surround_mix_level mix level for the surround channel(s)
|
||||
* @param lfe_mix_level mix level for the low-frequency effects channel
|
||||
* @param normalize if 1, coefficients will be normalized to prevent
|
||||
* overflow. if 0, coefficients will not be
|
||||
* normalized.
|
||||
* @param[out] matrix mixing coefficients; matrix[i + stride * o] is
|
||||
* the weight of input channel i in output channel o.
|
||||
* @param stride distance between adjacent input channels in the
|
||||
* matrix array
|
||||
* @param matrix_encoding matrixed stereo downmix mode (e.g. dplii)
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_build_matrix(uint64_t in_layout, uint64_t out_layout,
|
||||
double center_mix_level, double surround_mix_level,
|
||||
double lfe_mix_level, int normalize, double *matrix,
|
||||
int stride, enum AVMatrixEncoding matrix_encoding);
|
||||
|
||||
/**
|
||||
* Get the current channel mixing matrix.
|
||||
*
|
||||
* If no custom matrix has been previously set or the AVAudioResampleContext is
|
||||
* not open, an error is returned.
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param matrix mixing coefficients; matrix[i + stride * o] is the weight of
|
||||
* input channel i in output channel o.
|
||||
* @param stride distance between adjacent input channels in the matrix array
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_get_matrix(AVAudioResampleContext *avr, double *matrix,
|
||||
int stride);
|
||||
|
||||
/**
|
||||
* Set channel mixing matrix.
|
||||
*
|
||||
* Allows for setting a custom mixing matrix, overriding the default matrix
|
||||
* generated internally during avresample_open(). This function can be called
|
||||
* anytime on an allocated context, either before or after calling
|
||||
* avresample_open(), as long as the channel layouts have been set.
|
||||
* avresample_convert() always uses the current matrix.
|
||||
* Calling avresample_close() on the context will clear the current matrix.
|
||||
*
|
||||
* @see avresample_close()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param matrix mixing coefficients; matrix[i + stride * o] is the weight of
|
||||
* input channel i in output channel o.
|
||||
* @param stride distance between adjacent input channels in the matrix array
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_set_matrix(AVAudioResampleContext *avr, const double *matrix,
|
||||
int stride);
|
||||
|
||||
/**
|
||||
* Set a customized input channel mapping.
|
||||
*
|
||||
* This function can only be called when the allocated context is not open.
|
||||
* Also, the input channel layout must have already been set.
|
||||
*
|
||||
* Calling avresample_close() on the context will clear the channel mapping.
|
||||
*
|
||||
* The map for each input channel specifies the channel index in the source to
|
||||
* use for that particular channel, or -1 to mute the channel. Source channels
|
||||
* can be duplicated by using the same index for multiple input channels.
|
||||
*
|
||||
* Examples:
|
||||
*
|
||||
* Reordering 5.1 AAC order (C,L,R,Ls,Rs,LFE) to FFmpeg order (L,R,C,LFE,Ls,Rs):
|
||||
* { 1, 2, 0, 5, 3, 4 }
|
||||
*
|
||||
* Muting the 3rd channel in 4-channel input:
|
||||
* { 0, 1, -1, 3 }
|
||||
*
|
||||
* Duplicating the left channel of stereo input:
|
||||
* { 0, 0 }
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param channel_map customized input channel mapping
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_set_channel_mapping(AVAudioResampleContext *avr,
|
||||
const int *channel_map);
|
||||
|
||||
/**
|
||||
* Set compensation for resampling.
|
||||
*
|
||||
* This can be called anytime after avresample_open(). If resampling is not
|
||||
* automatically enabled because of a sample rate conversion, the
|
||||
* "force_resampling" option must have been set to 1 when opening the context
|
||||
* in order to use resampling compensation.
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param sample_delta compensation delta, in samples
|
||||
* @param compensation_distance compensation distance, in samples
|
||||
* @return 0 on success, negative AVERROR code on failure
|
||||
*/
|
||||
int avresample_set_compensation(AVAudioResampleContext *avr, int sample_delta,
|
||||
int compensation_distance);
|
||||
|
||||
/**
|
||||
* Provide the upper bound on the number of samples the configured
|
||||
* conversion would output.
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param in_nb_samples number of input samples
|
||||
*
|
||||
* @return number of samples or AVERROR(EINVAL) if the value
|
||||
* would exceed INT_MAX
|
||||
*/
|
||||
|
||||
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples);
|
||||
|
||||
/**
|
||||
* Convert input samples and write them to the output FIFO.
|
||||
*
|
||||
* The upper bound on the number of output samples can be obtained through
|
||||
* avresample_get_out_samples().
|
||||
*
|
||||
* The output data can be NULL or have fewer allocated samples than required.
|
||||
* In this case, any remaining samples not written to the output will be added
|
||||
* to an internal FIFO buffer, to be returned at the next call to this function
|
||||
* or to avresample_read().
|
||||
*
|
||||
* If converting sample rate, there may be data remaining in the internal
|
||||
* resampling delay buffer. avresample_get_delay() tells the number of remaining
|
||||
* samples. To get this data as output, call avresample_convert() with NULL
|
||||
* input.
|
||||
*
|
||||
* At the end of the conversion process, there may be data remaining in the
|
||||
* internal FIFO buffer. avresample_available() tells the number of remaining
|
||||
* samples. To get this data as output, either call avresample_convert() with
|
||||
* NULL input or call avresample_read().
|
||||
*
|
||||
* @see avresample_get_out_samples()
|
||||
* @see avresample_read()
|
||||
* @see avresample_get_delay()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param output output data pointers
|
||||
* @param out_plane_size output plane size, in bytes.
|
||||
* This can be 0 if unknown, but that will lead to
|
||||
* optimized functions not being used directly on the
|
||||
* output, which could slow down some conversions.
|
||||
* @param out_samples maximum number of samples that the output buffer can hold
|
||||
* @param input input data pointers
|
||||
* @param in_plane_size input plane size, in bytes
|
||||
* This can be 0 if unknown, but that will lead to
|
||||
* optimized functions not being used directly on the
|
||||
* input, which could slow down some conversions.
|
||||
* @param in_samples number of input samples to convert
|
||||
* @return number of samples written to the output buffer,
|
||||
* not including converted samples added to the internal
|
||||
* output FIFO
|
||||
*/
|
||||
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output,
|
||||
int out_plane_size, int out_samples, uint8_t **input,
|
||||
int in_plane_size, int in_samples);
|
||||
|
||||
/**
|
||||
* Return the number of samples currently in the resampling delay buffer.
|
||||
*
|
||||
* When resampling, there may be a delay between the input and output. Any
|
||||
* unconverted samples in each call are stored internally in a delay buffer.
|
||||
* This function allows the user to determine the current number of samples in
|
||||
* the delay buffer, which can be useful for synchronization.
|
||||
*
|
||||
* @see avresample_convert()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @return number of samples currently in the resampling delay buffer
|
||||
*/
|
||||
int avresample_get_delay(AVAudioResampleContext *avr);
|
||||
|
||||
/**
|
||||
* Return the number of available samples in the output FIFO.
|
||||
*
|
||||
* During conversion, if the user does not specify an output buffer or
|
||||
* specifies an output buffer that is smaller than what is needed, remaining
|
||||
* samples that are not written to the output are stored to an internal FIFO
|
||||
* buffer. The samples in the FIFO can be read with avresample_read() or
|
||||
* avresample_convert().
|
||||
*
|
||||
* @see avresample_read()
|
||||
* @see avresample_convert()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @return number of samples available for reading
|
||||
*/
|
||||
int avresample_available(AVAudioResampleContext *avr);
|
||||
|
||||
/**
|
||||
* Read samples from the output FIFO.
|
||||
*
|
||||
* During conversion, if the user does not specify an output buffer or
|
||||
* specifies an output buffer that is smaller than what is needed, remaining
|
||||
* samples that are not written to the output are stored to an internal FIFO
|
||||
* buffer. This function can be used to read samples from that internal FIFO.
|
||||
*
|
||||
* @see avresample_available()
|
||||
* @see avresample_convert()
|
||||
*
|
||||
* @param avr audio resample context
|
||||
* @param output output data pointers. May be NULL, in which case
|
||||
* nb_samples of data is discarded from output FIFO.
|
||||
* @param nb_samples number of samples to read from the FIFO
|
||||
* @return the number of samples written to output
|
||||
*/
|
||||
int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* AVRESAMPLE_AVRESAMPLE_H */
|
||||
54
libs/FFmpeg/include/libavresample/version.h
Normal file
54
libs/FFmpeg/include/libavresample/version.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVRESAMPLE_VERSION_H
|
||||
#define AVRESAMPLE_VERSION_H
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @ingroup lavr
|
||||
* Libavresample version macros.
|
||||
*/
|
||||
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBAVRESAMPLE_VERSION_MAJOR 1
|
||||
#define LIBAVRESAMPLE_VERSION_MINOR 3
|
||||
#define LIBAVRESAMPLE_VERSION_MICRO 0
|
||||
|
||||
#define LIBAVRESAMPLE_VERSION_INT AV_VERSION_INT(LIBAVRESAMPLE_VERSION_MAJOR, \
|
||||
LIBAVRESAMPLE_VERSION_MINOR, \
|
||||
LIBAVRESAMPLE_VERSION_MICRO)
|
||||
#define LIBAVRESAMPLE_VERSION AV_VERSION(LIBAVRESAMPLE_VERSION_MAJOR, \
|
||||
LIBAVRESAMPLE_VERSION_MINOR, \
|
||||
LIBAVRESAMPLE_VERSION_MICRO)
|
||||
#define LIBAVRESAMPLE_BUILD LIBAVRESAMPLE_VERSION_INT
|
||||
|
||||
#define LIBAVRESAMPLE_IDENT "Lavr" AV_STRINGIFY(LIBAVRESAMPLE_VERSION)
|
||||
|
||||
/**
|
||||
* FF_API_* defines may be placed below to indicate public API that will be
|
||||
* dropped at a future version bump. The defines themselves are not part of
|
||||
* the public API and may change, break or disappear at any time.
|
||||
*/
|
||||
|
||||
#ifndef FF_API_RESAMPLE_CLOSE_OPEN
|
||||
#define FF_API_RESAMPLE_CLOSE_OPEN (LIBAVRESAMPLE_VERSION_MAJOR < 2)
|
||||
#endif
|
||||
|
||||
#endif /* AVRESAMPLE_VERSION_H */
|
||||
@@ -34,6 +34,9 @@
|
||||
/**
|
||||
* @addtogroup lavu_audio
|
||||
* @{
|
||||
*
|
||||
* @defgroup lavu_audiofifo Audio FIFO Buffer
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
@@ -143,6 +146,7 @@ int av_audio_fifo_size(AVAudioFifo *af);
|
||||
int av_audio_fifo_space(AVAudioFifo *af);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
*/
|
||||
|
||||
|
||||
@@ -4,5 +4,4 @@
|
||||
#define AV_HAVE_BIGENDIAN 0
|
||||
#define AV_HAVE_FAST_UNALIGNED 1
|
||||
#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0
|
||||
#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0
|
||||
#endif /* AVUTIL_AVCONFIG_H */
|
||||
|
||||
@@ -151,6 +151,12 @@
|
||||
* @{
|
||||
*
|
||||
* @}
|
||||
*
|
||||
* @defgroup version_utils Library Version Macros
|
||||
*
|
||||
* @{
|
||||
*
|
||||
* @}
|
||||
*/
|
||||
|
||||
|
||||
@@ -282,10 +288,10 @@ char av_get_picture_type_char(enum AVPictureType pict_type);
|
||||
|
||||
#include "common.h"
|
||||
#include "error.h"
|
||||
#include "rational.h"
|
||||
#include "version.h"
|
||||
#include "macros.h"
|
||||
#include "mathematics.h"
|
||||
#include "rational.h"
|
||||
#include "log.h"
|
||||
#include "pixfmt.h"
|
||||
|
||||
@@ -325,6 +331,11 @@ unsigned av_int_list_length_for_size(unsigned elsize,
|
||||
*/
|
||||
FILE *av_fopen_utf8(const char *path, const char *mode);
|
||||
|
||||
/**
|
||||
* Return the fractional representation of the internal time base.
|
||||
*/
|
||||
AVRational av_get_time_base_q(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
|
||||
@@ -79,7 +79,7 @@
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @defgroup channel_mask_c Audio channel convenience macros
|
||||
* @defgroup channel_mask_c Audio channel layouts
|
||||
* @{
|
||||
* */
|
||||
#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
|
||||
@@ -121,10 +121,6 @@ enum AVMatrixEncoding {
|
||||
AV_MATRIX_ENCODING_NB
|
||||
};
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Return a channel layout id that matches name, or 0 if no match is found.
|
||||
*
|
||||
@@ -219,6 +215,7 @@ int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
|
||||
const char **name);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
*/
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@
|
||||
#define AV_CPU_FLAG_VFP (1 << 3)
|
||||
#define AV_CPU_FLAG_VFPV3 (1 << 4)
|
||||
#define AV_CPU_FLAG_NEON (1 << 5)
|
||||
#define AV_CPU_FLAG_ARMV8 (1 << 6)
|
||||
|
||||
/**
|
||||
* Return the flags which specify extensions supported by the CPU.
|
||||
|
||||
@@ -39,6 +39,7 @@ typedef enum {
|
||||
AV_CRC_16_CCITT,
|
||||
AV_CRC_32_IEEE,
|
||||
AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
|
||||
AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */
|
||||
AV_CRC_24_IEEE = 12,
|
||||
AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
|
||||
}AVCRCId;
|
||||
|
||||
@@ -31,6 +31,8 @@
|
||||
#ifndef AVUTIL_DICT_H
|
||||
#define AVUTIL_DICT_H
|
||||
|
||||
#include "version.h"
|
||||
|
||||
/**
|
||||
* @addtogroup lavu_dict AVDictionary
|
||||
* @ingroup lavu_data
|
||||
@@ -97,8 +99,8 @@ typedef struct AVDictionary AVDictionary;
|
||||
* @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
|
||||
* @return found entry or NULL in case no matching entry was found in the dictionary
|
||||
*/
|
||||
AVDictionaryEntry *
|
||||
av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
|
||||
AVDictionaryEntry *av_dict_get(FF_CONST_AVUTIL53 AVDictionary *m, const char *key,
|
||||
const AVDictionaryEntry *prev, int flags);
|
||||
|
||||
/**
|
||||
* Get number of entries in dictionary.
|
||||
@@ -148,7 +150,7 @@ int av_dict_parse_string(AVDictionary **pm, const char *str,
|
||||
* @param flags flags to use when setting entries in *dst
|
||||
* @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
|
||||
*/
|
||||
void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags);
|
||||
void av_dict_copy(AVDictionary **dst, FF_CONST_AVUTIL53 AVDictionary *src, int flags);
|
||||
|
||||
/**
|
||||
* Free all the memory allocated for an AVDictionary struct
|
||||
|
||||
86
libs/FFmpeg/include/libavutil/display.h
Normal file
86
libs/FFmpeg/include/libavutil/display.h
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_DISPLAY_H
|
||||
#define AVUTIL_DISPLAY_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* The display transformation matrix specifies an affine transformation that
|
||||
* should be applied to video frames for correct presentation. It is compatible
|
||||
* with the matrices stored in the ISO/IEC 14496-12 container format.
|
||||
*
|
||||
* The data is a 3x3 matrix represented as a 9-element array:
|
||||
*
|
||||
* | a b u |
|
||||
* (a, b, u, c, d, v, x, y, w) -> | c d v |
|
||||
* | x y w |
|
||||
*
|
||||
* All numbers are stored in native endianness, as 16.16 fixed-point values,
|
||||
* except for u, v and w, which are stored as 2.30 fixed-point values.
|
||||
*
|
||||
* The transformation maps a point (p, q) in the source (pre-transformation)
|
||||
* frame to the point (p', q') in the destination (post-transformation) frame as
|
||||
* follows:
|
||||
* | a b u |
|
||||
* (p, q, 1) . | c d v | = z * (p', q', 1)
|
||||
* | x y w |
|
||||
*
|
||||
* The transformation can also be more explicitly written in components as
|
||||
* follows:
|
||||
* p' = (a * p + c * q + x) / z;
|
||||
* q' = (b * p + d * q + y) / z;
|
||||
* z = u * p + v * q + w
|
||||
*/
|
||||
|
||||
/**
|
||||
* Extract the rotation component of the transformation matrix.
|
||||
*
|
||||
* @param matrix the transformation matrix
|
||||
* @return the angle (in degrees) by which the transformation rotates the frame.
|
||||
* The angle will be in range [-180.0, 180.0], or NaN if the matrix is
|
||||
* singular.
|
||||
*
|
||||
* @note floating point numbers are inherently inexact, so callers are
|
||||
* recommended to round the return value to nearest integer before use.
|
||||
*/
|
||||
double av_display_rotation_get(const int32_t matrix[9]);
|
||||
|
||||
/**
|
||||
* Initialize a transformation matrix describing a pure rotation by the
|
||||
* specified angle (in degrees).
|
||||
*
|
||||
* @param matrix an allocated transformation matrix (will be fully overwritten
|
||||
* by this function)
|
||||
* @param angle rotation angle in degrees.
|
||||
*/
|
||||
void av_display_rotation_set(int32_t matrix[9], double angle);
|
||||
|
||||
/**
|
||||
* Flip the input matrix horizontally and/or vertically.
|
||||
*
|
||||
* @param matrix an allocated transformation matrix
|
||||
* @param hflip whether the matrix should be flipped horizontally
|
||||
* @param vflip whether the matrix should be flipped vertically
|
||||
*/
|
||||
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip);
|
||||
|
||||
#endif /* AVUTIL_DISPLAY_H */
|
||||
@@ -95,11 +95,12 @@ typedef struct AVDownmixInfo {
|
||||
/**
|
||||
* Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.
|
||||
*
|
||||
* The side data is created and added to the frame if it's absent.
|
||||
* If the side data is absent, it is created and added to the frame.
|
||||
*
|
||||
* @param frame the frame for which the side data is to be obtained.
|
||||
* @param frame the frame for which the side data is to be obtained or created
|
||||
*
|
||||
* @return the AVDownmixInfo structure to be edited by the caller.
|
||||
* @return the AVDownmixInfo structure to be edited by the caller, or NULL if
|
||||
* the structure cannot be allocated.
|
||||
*/
|
||||
AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#ifndef AVUTIL_FFVERSION_H
|
||||
#define AVUTIL_FFVERSION_H
|
||||
#define FFMPEG_VERSION "2.2"
|
||||
#define FFMPEG_VERSION "2.3.3"
|
||||
#endif /* AVUTIL_FFVERSION_H */
|
||||
|
||||
@@ -41,12 +41,26 @@ typedef struct AVFifoBuffer {
|
||||
*/
|
||||
AVFifoBuffer *av_fifo_alloc(unsigned int size);
|
||||
|
||||
/**
|
||||
* Initialize an AVFifoBuffer.
|
||||
* @param nmemb number of elements
|
||||
* @param size size of the single element
|
||||
* @return AVFifoBuffer or NULL in case of memory allocation failure
|
||||
*/
|
||||
AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
|
||||
|
||||
/**
|
||||
* Free an AVFifoBuffer.
|
||||
* @param f AVFifoBuffer to free
|
||||
*/
|
||||
void av_fifo_free(AVFifoBuffer *f);
|
||||
|
||||
/**
|
||||
* Free an AVFifoBuffer and reset pointer to NULL.
|
||||
* @param f AVFifoBuffer to free
|
||||
*/
|
||||
void av_fifo_freep(AVFifoBuffer **f);
|
||||
|
||||
/**
|
||||
* Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
|
||||
* @param f AVFifoBuffer to reset
|
||||
@@ -59,7 +73,7 @@ void av_fifo_reset(AVFifoBuffer *f);
|
||||
* @param f AVFifoBuffer to read from
|
||||
* @return size
|
||||
*/
|
||||
int av_fifo_size(AVFifoBuffer *f);
|
||||
int av_fifo_size(FF_CONST_AVUTIL53 AVFifoBuffer *f);
|
||||
|
||||
/**
|
||||
* Return the amount of space in bytes in the AVFifoBuffer, that is the
|
||||
@@ -67,7 +81,7 @@ int av_fifo_size(AVFifoBuffer *f);
|
||||
* @param f AVFifoBuffer to write into
|
||||
* @return size
|
||||
*/
|
||||
int av_fifo_space(AVFifoBuffer *f);
|
||||
int av_fifo_space(FF_CONST_AVUTIL53 AVFifoBuffer *f);
|
||||
|
||||
/**
|
||||
* Feed data from an AVFifoBuffer to a user-supplied callback.
|
||||
|
||||
@@ -33,32 +33,10 @@
|
||||
#include "dict.h"
|
||||
#include "rational.h"
|
||||
#include "samplefmt.h"
|
||||
#include "pixfmt.h"
|
||||
#include "version.h"
|
||||
|
||||
|
||||
enum AVColorSpace{
|
||||
AVCOL_SPC_RGB = 0,
|
||||
AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
|
||||
AVCOL_SPC_UNSPECIFIED = 2,
|
||||
AVCOL_SPC_FCC = 4,
|
||||
AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
|
||||
AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
|
||||
AVCOL_SPC_SMPTE240M = 7,
|
||||
AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
|
||||
AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
|
||||
AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
|
||||
AVCOL_SPC_NB , ///< Not part of ABI
|
||||
};
|
||||
#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG
|
||||
|
||||
enum AVColorRange{
|
||||
AVCOL_RANGE_UNSPECIFIED = 0,
|
||||
AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
|
||||
AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
|
||||
AVCOL_RANGE_NB , ///< Not part of ABI
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @defgroup lavu_frame AVFrame
|
||||
* @ingroup lavu_data
|
||||
@@ -92,6 +70,18 @@ enum AVFrameSideDataType {
|
||||
* The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
|
||||
*/
|
||||
AV_FRAME_DATA_DOWNMIX_INFO,
|
||||
/**
|
||||
* ReplayGain information in the form of the AVReplayGain struct.
|
||||
*/
|
||||
AV_FRAME_DATA_REPLAYGAIN,
|
||||
/**
|
||||
* This side data contains a 3x3 transformation matrix describing an affine
|
||||
* transformation that needs to be applied to the frame for correct
|
||||
* presentation.
|
||||
*
|
||||
* See libavutil/display.h for a detailed description of the data.
|
||||
*/
|
||||
AV_FRAME_DATA_DISPLAYMATRIX,
|
||||
};
|
||||
|
||||
typedef struct AVFrameSideData {
|
||||
@@ -224,7 +214,7 @@ typedef struct AVFrame {
|
||||
int64_t pkt_pts;
|
||||
|
||||
/**
|
||||
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used)
|
||||
* DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
|
||||
* This is also the Presentation time of this AVFrame calculated from
|
||||
* only AVPacket.dts values without pts values.
|
||||
*/
|
||||
@@ -279,7 +269,6 @@ typedef struct AVFrame {
|
||||
* motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
|
||||
* @endcode
|
||||
*/
|
||||
attribute_deprecated
|
||||
int16_t (*motion_val[2])[2];
|
||||
|
||||
/**
|
||||
@@ -376,7 +365,6 @@ typedef struct AVFrame {
|
||||
* log2 of the size of the block which a single vector in motion_val represents:
|
||||
* (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
|
||||
*/
|
||||
attribute_deprecated
|
||||
uint8_t motion_subsample_log2;
|
||||
#endif
|
||||
|
||||
@@ -443,6 +431,32 @@ typedef struct AVFrame {
|
||||
*/
|
||||
int flags;
|
||||
|
||||
#if FF_API_AVFRAME_COLORSPACE
|
||||
/**
|
||||
* MPEG vs JPEG YUV range.
|
||||
* It must be accessed using av_frame_get_color_range() and
|
||||
* av_frame_set_color_range().
|
||||
* - encoding: Set by user
|
||||
* - decoding: Set by libavcodec
|
||||
*/
|
||||
enum AVColorRange color_range;
|
||||
|
||||
enum AVColorPrimaries color_primaries;
|
||||
|
||||
enum AVColorTransferCharacteristic color_trc;
|
||||
|
||||
/**
|
||||
* YUV colorspace type.
|
||||
* It must be accessed using av_frame_get_colorspace() and
|
||||
* av_frame_set_colorspace().
|
||||
* - encoding: Set by user
|
||||
* - decoding: Set by libavcodec
|
||||
*/
|
||||
enum AVColorSpace colorspace;
|
||||
|
||||
enum AVChromaLocation chroma_location;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* frame timestamp estimated using various heuristics, in stream time base
|
||||
* Code outside libavcodec should access this field using:
|
||||
@@ -512,25 +526,6 @@ typedef struct AVFrame {
|
||||
*/
|
||||
int pkt_size;
|
||||
|
||||
/**
|
||||
* YUV colorspace type.
|
||||
* It must be accessed using av_frame_get_colorspace() and
|
||||
* av_frame_set_colorspace().
|
||||
* - encoding: Set by user
|
||||
* - decoding: Set by libavcodec
|
||||
*/
|
||||
enum AVColorSpace colorspace;
|
||||
|
||||
/**
|
||||
* MPEG vs JPEG YUV range.
|
||||
* It must be accessed using av_frame_get_color_range() and
|
||||
* av_frame_set_color_range().
|
||||
* - encoding: Set by user
|
||||
* - decoding: Set by libavcodec
|
||||
*/
|
||||
enum AVColorRange color_range;
|
||||
|
||||
|
||||
/**
|
||||
* Not to be accessed directly from outside libavutil
|
||||
*/
|
||||
@@ -726,6 +721,12 @@ AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
|
||||
AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
|
||||
enum AVFrameSideDataType type);
|
||||
|
||||
/**
|
||||
* If side data of the supplied type exists in the frame, free it and remove it
|
||||
* from the frame.
|
||||
*/
|
||||
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
112
libs/FFmpeg/include/libavutil/hash.h
Normal file
112
libs/FFmpeg/include/libavutil/hash.h
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_HASH_H
|
||||
#define AVUTIL_HASH_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
struct AVHashContext;
|
||||
|
||||
/**
|
||||
* Allocate a hash context for the algorithm specified by name.
|
||||
*
|
||||
* @return >= 0 for success, a negative error code for failure
|
||||
* @note The context is not initialized, you must call av_hash_init().
|
||||
*/
|
||||
int av_hash_alloc(struct AVHashContext **ctx, const char *name);
|
||||
|
||||
/**
|
||||
* Get the names of available hash algorithms.
|
||||
*
|
||||
* This function can be used to enumerate the algorithms.
|
||||
*
|
||||
* @param i index of the hash algorithm, starting from 0
|
||||
* @return a pointer to a static string or NULL if i is out of range
|
||||
*/
|
||||
const char *av_hash_names(int i);
|
||||
|
||||
/**
|
||||
* Get the name of the algorithm corresponding to the given hash context.
|
||||
*/
|
||||
const char *av_hash_get_name(const struct AVHashContext *ctx);
|
||||
|
||||
/**
|
||||
* Maximum value that av_hash_get_size will currently return.
|
||||
*
|
||||
* You can use this if you absolutely want or need to use static allocation
|
||||
* and are fine with not supporting hashes newly added to libavutil without
|
||||
* recompilation.
|
||||
* Note that you still need to check against av_hash_get_size, adding new hashes
|
||||
* with larger sizes will not be considered an ABI change and should not cause
|
||||
* your code to overflow a buffer.
|
||||
*/
|
||||
#define AV_HASH_MAX_SIZE 64
|
||||
|
||||
/**
|
||||
* Get the size of the resulting hash value in bytes.
|
||||
*
|
||||
* The pointer passed to av_hash_final have space for at least this many bytes.
|
||||
*/
|
||||
int av_hash_get_size(const struct AVHashContext *ctx);
|
||||
|
||||
/**
|
||||
* Initialize or reset a hash context.
|
||||
*/
|
||||
void av_hash_init(struct AVHashContext *ctx);
|
||||
|
||||
/**
|
||||
* Update a hash context with additional data.
|
||||
*/
|
||||
void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);
|
||||
|
||||
/**
|
||||
* Finalize a hash context and compute the actual hash value.
|
||||
*/
|
||||
void av_hash_final(struct AVHashContext *ctx, uint8_t *dst);
|
||||
|
||||
/**
|
||||
* Finalize a hash context and compute the actual hash value.
|
||||
* If size is smaller than the hash size, the hash is truncated;
|
||||
* if size is larger, the buffer is padded with 0.
|
||||
*/
|
||||
void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size);
|
||||
|
||||
/**
|
||||
* Finalize a hash context and compute the actual hash value as a hex string.
|
||||
* The string is always 0-terminated.
|
||||
* If size is smaller than 2 * hash_size + 1, the hex string is truncated.
|
||||
*/
|
||||
void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size);
|
||||
|
||||
/**
|
||||
* Finalize a hash context and compute the actual hash value as a base64 string.
|
||||
* The string is always 0-terminated.
|
||||
* If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is
|
||||
* truncated.
|
||||
*/
|
||||
void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size);
|
||||
|
||||
/**
|
||||
* Free hash context.
|
||||
*/
|
||||
void av_hash_freep(struct AVHashContext **ctx);
|
||||
|
||||
#endif /* AVUTIL_HASH_H */
|
||||
@@ -29,6 +29,7 @@
|
||||
|
||||
#include "avutil.h"
|
||||
#include "pixdesc.h"
|
||||
#include "rational.h"
|
||||
|
||||
/**
|
||||
* Compute the max pixel step for each plane of an image with a
|
||||
@@ -190,6 +191,20 @@ int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
|
||||
*/
|
||||
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
|
||||
|
||||
/**
|
||||
* Check if the given sample aspect ratio of an image is valid.
|
||||
*
|
||||
* It is considered invalid if the denominator is 0 or if applying the ratio
|
||||
* to the image size would make the smaller dimension less than 1. If the
|
||||
* sar numerator is 0, it is considered unknown and will return as valid.
|
||||
*
|
||||
* @param w width of the image
|
||||
* @param h height of the image
|
||||
* @param sar sample aspect ratio of the image
|
||||
* @return 0 if valid, a negative AVERROR code otherwise
|
||||
*/
|
||||
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar);
|
||||
|
||||
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
|
||||
|
||||
/**
|
||||
|
||||
@@ -37,6 +37,12 @@ typedef enum {
|
||||
AV_CLASS_CATEGORY_BITSTREAM_FILTER,
|
||||
AV_CLASS_CATEGORY_SWSCALER,
|
||||
AV_CLASS_CATEGORY_SWRESAMPLER,
|
||||
AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
|
||||
AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
|
||||
AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
|
||||
AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
|
||||
AV_CLASS_CATEGORY_DEVICE_OUTPUT,
|
||||
AV_CLASS_CATEGORY_DEVICE_INPUT,
|
||||
AV_CLASS_CATEGORY_NB, ///< not part of ABI/API
|
||||
}AVClassCategory;
|
||||
|
||||
@@ -185,6 +191,16 @@ typedef struct AVClass {
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Sets additional colors for extended debugging sessions.
|
||||
* @code
|
||||
av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
|
||||
@endcode
|
||||
* Requires 256color terminal support. Uses outside debugging is not
|
||||
* recommended.
|
||||
*/
|
||||
#define AV_LOG_C(x) (x << 8)
|
||||
|
||||
/**
|
||||
* Send the specified message to the log if the level is less than or equal
|
||||
* to the current av_log_level. By default, all logging messages are sent to
|
||||
@@ -305,7 +321,17 @@ void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
|
||||
* call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
|
||||
*/
|
||||
#define AV_LOG_SKIP_REPEATED 1
|
||||
|
||||
/**
|
||||
* Include the log severity in messages originating from codecs.
|
||||
*
|
||||
* Results in messages such as:
|
||||
* [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
|
||||
*/
|
||||
#define AV_LOG_PRINT_LEVEL 2
|
||||
|
||||
void av_log_set_flags(int arg);
|
||||
int av_log_get_flags(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
||||
@@ -146,7 +146,7 @@ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int
|
||||
/**
|
||||
* Add a value to a timestamp.
|
||||
*
|
||||
* This function gurantees that when the same value is repeatly added that
|
||||
* This function guarantees that when the same value is repeatly added that
|
||||
* no accumulation of rounding errors occurs.
|
||||
*
|
||||
* @param ts Input timestamp
|
||||
|
||||
@@ -276,10 +276,25 @@ void av_freep(void *ptr);
|
||||
* @param tab_ptr pointer to the array to grow
|
||||
* @param nb_ptr pointer to the number of elements in the array
|
||||
* @param elem element to add
|
||||
* @see av_dynarray2_add()
|
||||
* @see av_dynarray_add_nofree(), av_dynarray2_add()
|
||||
*/
|
||||
void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
|
||||
|
||||
/**
|
||||
* Add an element to a dynamic array.
|
||||
*
|
||||
* Function has the same functionality as av_dynarray_add(),
|
||||
* but it doesn't free memory on fails. It returns error code
|
||||
* instead and leave current buffer untouched.
|
||||
*
|
||||
* @param tab_ptr pointer to the array to grow
|
||||
* @param nb_ptr pointer to the number of elements in the array
|
||||
* @param elem element to add
|
||||
* @return >=0 on success, negative otherwise.
|
||||
* @see av_dynarray_add(), av_dynarray2_add()
|
||||
*/
|
||||
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
|
||||
|
||||
/**
|
||||
* Add an element of size elem_size to a dynamic array.
|
||||
*
|
||||
@@ -299,7 +314,7 @@ void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
|
||||
* the new added element is not filled.
|
||||
* @return pointer to the data of the element to copy in the new allocated space.
|
||||
* If NULL, the new allocated space is left uninitialized."
|
||||
* @see av_dynarray_add()
|
||||
* @see av_dynarray_add(), av_dynarray_add_nofree()
|
||||
*/
|
||||
void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
|
||||
const uint8_t *elem_data);
|
||||
|
||||
@@ -313,17 +313,67 @@ typedef struct AVOption {
|
||||
*/
|
||||
typedef struct AVOptionRange {
|
||||
const char *str;
|
||||
double value_min, value_max; ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count
|
||||
double component_min, component_max; ///< For string this represents the unicode range for chars, 0-127 limits to ASCII
|
||||
int is_range; ///< if set to 1 the struct encodes a range, if set to 0 a single value
|
||||
/**
|
||||
* Value range.
|
||||
* For string ranges this represents the min/max length.
|
||||
* For dimensions this represents the min/max pixel count or width/height in multi-component case.
|
||||
*/
|
||||
double value_min, value_max;
|
||||
/**
|
||||
* Value's component range.
|
||||
* For string this represents the unicode range for chars, 0-127 limits to ASCII.
|
||||
*/
|
||||
double component_min, component_max;
|
||||
/**
|
||||
* Range flag.
|
||||
* If set to 1 the struct encodes a range, if set to 0 a single value.
|
||||
*/
|
||||
int is_range;
|
||||
} AVOptionRange;
|
||||
|
||||
/**
|
||||
* List of AVOptionRange structs
|
||||
* List of AVOptionRange structs.
|
||||
*/
|
||||
typedef struct AVOptionRanges {
|
||||
/**
|
||||
* Array of option ranges.
|
||||
*
|
||||
* Most of option types use just one component.
|
||||
* Following describes multi-component option types:
|
||||
*
|
||||
* AV_OPT_TYPE_IMAGE_SIZE:
|
||||
* component index 0: range of pixel count (width * height).
|
||||
* component index 1: range of width.
|
||||
* component index 2: range of height.
|
||||
*
|
||||
* @note To obtain multi-component version of this structure, user must
|
||||
* provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or
|
||||
* av_opt_query_ranges_default function.
|
||||
*
|
||||
* Multi-component range can be read as in following example:
|
||||
*
|
||||
* @code
|
||||
* int range_index, component_index;
|
||||
* AVOptionRanges *ranges;
|
||||
* AVOptionRange *range[3]; //may require more than 3 in the future.
|
||||
* av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE);
|
||||
* for (range_index = 0; range_index < ranges->nb_ranges; range_index++) {
|
||||
* for (component_index = 0; component_index < ranges->nb_components; component_index++)
|
||||
* range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index];
|
||||
* //do something with range here.
|
||||
* }
|
||||
* av_opt_freep_ranges(&ranges);
|
||||
* @endcode
|
||||
*/
|
||||
AVOptionRange **range;
|
||||
/**
|
||||
* Number of ranges per component.
|
||||
*/
|
||||
int nb_ranges;
|
||||
/**
|
||||
* Number of componentes.
|
||||
*/
|
||||
int nb_components;
|
||||
} AVOptionRanges;
|
||||
|
||||
|
||||
@@ -491,6 +541,24 @@ int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name)
|
||||
*/
|
||||
int av_opt_set_dict(void *obj, struct AVDictionary **options);
|
||||
|
||||
|
||||
/**
|
||||
* Set all the options from a given dictionary on an object.
|
||||
*
|
||||
* @param obj a struct whose first element is a pointer to AVClass
|
||||
* @param options options to process. This dictionary will be freed and replaced
|
||||
* by a new one containing all options not found in obj.
|
||||
* Of course this new dictionary needs to be freed by caller
|
||||
* with av_dict_free().
|
||||
* @param search_flags A combination of AV_OPT_SEARCH_*.
|
||||
*
|
||||
* @return 0 on success, a negative AVERROR if some option was found in obj,
|
||||
* but could not be set.
|
||||
*
|
||||
* @see av_dict_copy()
|
||||
*/
|
||||
int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags);
|
||||
|
||||
/**
|
||||
* Extract a key-value pair from the beginning of a string.
|
||||
*
|
||||
@@ -558,6 +626,13 @@ int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational
|
||||
*/
|
||||
#define AV_OPT_SEARCH_FAKE_OBJ 0x0002
|
||||
|
||||
/**
|
||||
* Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than
|
||||
* one component for certain option types.
|
||||
* @see AVOptionRanges for details.
|
||||
*/
|
||||
#define AV_OPT_MULTI_COMPONENT_RANGE 0x1000
|
||||
|
||||
/**
|
||||
* Look for an option in an object. Consider only options which
|
||||
* have all the specified flags set.
|
||||
@@ -739,13 +814,16 @@ void av_opt_freep_ranges(AVOptionRanges **ranges);
|
||||
*
|
||||
* @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
|
||||
* AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
|
||||
* AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
|
||||
*
|
||||
* The result must be freed with av_opt_freep_ranges.
|
||||
*
|
||||
* @return >= 0 on success, a negative errro code otherwise
|
||||
* @return number of compontents returned on success, a negative errro code otherwise
|
||||
*/
|
||||
int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);
|
||||
|
||||
int av_opt_copy(void *dest, void *src);
|
||||
|
||||
/**
|
||||
* Get a default list of allowed ranges for the given option.
|
||||
*
|
||||
@@ -754,10 +832,11 @@ int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags
|
||||
*
|
||||
* @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
|
||||
* AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
|
||||
* AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
|
||||
*
|
||||
* The result must be freed with av_opt_free_ranges.
|
||||
*
|
||||
* @return >= 0 on success, a negative errro code otherwise
|
||||
* @return number of compontents returned on success, a negative errro code otherwise
|
||||
*/
|
||||
int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);
|
||||
|
||||
|
||||
@@ -255,9 +255,9 @@ enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
|
||||
* Utility function to access log2_chroma_w log2_chroma_h from
|
||||
* the pixel format AVPixFmtDescriptor.
|
||||
*
|
||||
* See avcodec_get_chroma_sub_sample() for a function that asserts a
|
||||
* See av_get_chroma_sub_sample() for a function that asserts a
|
||||
* valid pixel format instead of returning an error code.
|
||||
* Its recommanded that you use avcodec_get_chroma_sub_sample unless
|
||||
* Its recommended that you use avcodec_get_chroma_sub_sample unless
|
||||
* you do check the return code!
|
||||
*
|
||||
* @param[in] pix_fmt the pixel format
|
||||
@@ -287,5 +287,53 @@ void ff_check_pixfmt_descriptors(void);
|
||||
*/
|
||||
enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
|
||||
|
||||
#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
|
||||
#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
|
||||
#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
|
||||
#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
|
||||
#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
|
||||
#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
|
||||
|
||||
/**
|
||||
* Compute what kind of losses will occur when converting from one specific
|
||||
* pixel format to another.
|
||||
* When converting from one pixel format to another, information loss may occur.
|
||||
* For example, when converting from RGB24 to GRAY, the color information will
|
||||
* be lost. Similarly, other losses occur when converting from some formats to
|
||||
* other formats. These losses can involve loss of chroma, but also loss of
|
||||
* resolution, loss of color depth, loss due to the color space conversion, loss
|
||||
* of the alpha bits or loss due to color quantization.
|
||||
* av_get_fix_fmt_loss() informs you about the various types of losses
|
||||
* which will occur when converting from one pixel format to another.
|
||||
*
|
||||
* @param[in] dst_pix_fmt destination pixel format
|
||||
* @param[in] src_pix_fmt source pixel format
|
||||
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
|
||||
* @return Combination of flags informing you what kind of losses will occur
|
||||
* (maximum loss for an invalid dst_pix_fmt).
|
||||
*/
|
||||
int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
|
||||
enum AVPixelFormat src_pix_fmt,
|
||||
int has_alpha);
|
||||
|
||||
/**
|
||||
* Compute what kind of losses will occur when converting from one specific
|
||||
* pixel format to another.
|
||||
* When converting from one pixel format to another, information loss may occur.
|
||||
* For example, when converting from RGB24 to GRAY, the color information will
|
||||
* be lost. Similarly, other losses occur when converting from some formats to
|
||||
* other formats. These losses can involve loss of chroma, but also loss of
|
||||
* resolution, loss of color depth, loss due to the color space conversion, loss
|
||||
* of the alpha bits or loss due to color quantization.
|
||||
* av_get_fix_fmt_loss() informs you about the various types of losses
|
||||
* which will occur when converting from one pixel format to another.
|
||||
*
|
||||
* @param[in] dst_pix_fmt destination pixel format
|
||||
* @param[in] src_pix_fmt source pixel format
|
||||
* @param[in] has_alpha Whether the source pixel format alpha channel is used.
|
||||
* @return Combination of flags informing you what kind of losses will occur
|
||||
* (maximum loss for an invalid dst_pix_fmt).
|
||||
*/
|
||||
enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
|
||||
enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
|
||||
#endif /* AVUTIL_PIXDESC_H */
|
||||
|
||||
@@ -217,6 +217,21 @@ enum AVPixelFormat {
|
||||
AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
|
||||
AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
|
||||
|
||||
/**
|
||||
* duplicated pixel formats for compatibility with libav.
|
||||
* FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)
|
||||
* also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028
|
||||
* Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)
|
||||
*/
|
||||
AV_PIX_FMT_RGBA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
|
||||
AV_PIX_FMT_RGBA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
|
||||
AV_PIX_FMT_BGRA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
|
||||
AV_PIX_FMT_BGRA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
|
||||
|
||||
AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
|
||||
|
||||
AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
|
||||
|
||||
#ifndef AV_PIX_FMT_ABI_GIT_MASTER
|
||||
AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
|
||||
AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
|
||||
@@ -277,6 +292,10 @@ enum AVPixelFormat {
|
||||
#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
|
||||
#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV
|
||||
#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV
|
||||
#define AV_PIX_FMT_RGBA64BE AV_PIX_FMT_RGBA64BE_LIBAV
|
||||
#define AV_PIX_FMT_RGBA64LE AV_PIX_FMT_RGBA64LE_LIBAV
|
||||
#define AV_PIX_FMT_BGRA64BE AV_PIX_FMT_BGRA64BE_LIBAV
|
||||
#define AV_PIX_FMT_BGRA64LE AV_PIX_FMT_BGRA64LE_LIBAV
|
||||
#endif
|
||||
|
||||
|
||||
@@ -301,10 +320,12 @@ enum AVPixelFormat {
|
||||
#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
|
||||
#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
|
||||
#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
|
||||
#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
|
||||
#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
|
||||
#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
|
||||
#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
|
||||
#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
|
||||
#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
|
||||
|
||||
#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
|
||||
#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
|
||||
@@ -322,8 +343,6 @@ enum AVPixelFormat {
|
||||
#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
|
||||
#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
|
||||
|
||||
#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
|
||||
#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
|
||||
#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
|
||||
#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
|
||||
#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
|
||||
@@ -350,6 +369,7 @@ enum AVPixelFormat {
|
||||
#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
|
||||
#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
|
||||
|
||||
|
||||
#if FF_API_PIX_FMT
|
||||
#define PixelFormat AVPixelFormat
|
||||
|
||||
@@ -400,4 +420,90 @@ enum AVPixelFormat {
|
||||
#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Chromaticity coordinates of the source primaries.
|
||||
*/
|
||||
enum AVColorPrimaries {
|
||||
AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
|
||||
AVCOL_PRI_UNSPECIFIED = 2,
|
||||
AVCOL_PRI_RESERVED = 3,
|
||||
AVCOL_PRI_BT470M = 4,
|
||||
AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
|
||||
AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
|
||||
AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
|
||||
AVCOL_PRI_FILM = 8,
|
||||
AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
|
||||
AVCOL_PRI_NB, ///< Not part of ABI
|
||||
};
|
||||
|
||||
/**
|
||||
* Color Transfer Characteristic.
|
||||
*/
|
||||
enum AVColorTransferCharacteristic {
|
||||
AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
|
||||
AVCOL_TRC_UNSPECIFIED = 2,
|
||||
AVCOL_TRC_RESERVED = 3,
|
||||
AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
|
||||
AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
|
||||
AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
|
||||
AVCOL_TRC_SMPTE240M = 7,
|
||||
AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
|
||||
AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
|
||||
AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
|
||||
AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
|
||||
AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
|
||||
AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
|
||||
AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system
|
||||
AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system
|
||||
AVCOL_TRC_NB, ///< Not part of ABI
|
||||
};
|
||||
|
||||
/**
|
||||
* YUV colorspace type.
|
||||
*/
|
||||
enum AVColorSpace {
|
||||
AVCOL_SPC_RGB = 0,
|
||||
AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
|
||||
AVCOL_SPC_UNSPECIFIED = 2,
|
||||
AVCOL_SPC_RESERVED = 3,
|
||||
AVCOL_SPC_FCC = 4,
|
||||
AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
|
||||
AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
|
||||
AVCOL_SPC_SMPTE240M = 7,
|
||||
AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
|
||||
AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
|
||||
AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
|
||||
AVCOL_SPC_NB, ///< Not part of ABI
|
||||
};
|
||||
#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG
|
||||
|
||||
|
||||
/**
|
||||
* MPEG vs JPEG YUV range.
|
||||
*/
|
||||
enum AVColorRange {
|
||||
AVCOL_RANGE_UNSPECIFIED = 0,
|
||||
AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
|
||||
AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
|
||||
AVCOL_RANGE_NB, ///< Not part of ABI
|
||||
};
|
||||
|
||||
/**
|
||||
* Location of chroma samples.
|
||||
*
|
||||
* X X 3 4 X X are luma samples,
|
||||
* 1 2 1-6 are possible chroma positions
|
||||
* X X 5 6 X 0 is undefined/unknown position
|
||||
*/
|
||||
enum AVChromaLocation {
|
||||
AVCHROMA_LOC_UNSPECIFIED = 0,
|
||||
AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
|
||||
AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
|
||||
AVCHROMA_LOC_TOPLEFT = 3, ///< DV
|
||||
AVCHROMA_LOC_TOP = 4,
|
||||
AVCHROMA_LOC_BOTTOMLEFT = 5,
|
||||
AVCHROMA_LOC_BOTTOM = 6,
|
||||
AVCHROMA_LOC_NB, ///< Not part of ABI
|
||||
};
|
||||
|
||||
#endif /* AVUTIL_PIXFMT_H */
|
||||
|
||||
51
libs/FFmpeg/include/libavutil/replaygain.h
Normal file
51
libs/FFmpeg/include/libavutil/replaygain.h
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_REPLAYGAIN_H
|
||||
#define AVUTIL_REPLAYGAIN_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/**
|
||||
* ReplayGain information (see
|
||||
* http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification).
|
||||
* The size of this struct is a part of the public ABI.
|
||||
*/
|
||||
typedef struct AVReplayGain {
|
||||
/**
|
||||
* Track replay gain in microbels (divide by 100000 to get the value in dB).
|
||||
* Should be set to INT32_MIN when unknown.
|
||||
*/
|
||||
int32_t track_gain;
|
||||
/**
|
||||
* Peak track amplitude, with 100000 representing full scale (but values
|
||||
* may overflow). 0 when unknown.
|
||||
*/
|
||||
uint32_t track_peak;
|
||||
/**
|
||||
* Same as track_gain, but for the whole album.
|
||||
*/
|
||||
int32_t album_gain;
|
||||
/**
|
||||
* Same as track_peak, but for the whole album,
|
||||
*/
|
||||
uint32_t album_peak;
|
||||
} AVReplayGain;
|
||||
|
||||
#endif /* AVUTIL_REPLAYGAIN_H */
|
||||
@@ -25,26 +25,36 @@
|
||||
#include "attributes.h"
|
||||
|
||||
/**
|
||||
* Audio Sample Formats
|
||||
* @addtogroup lavu_audio
|
||||
* @{
|
||||
*
|
||||
* @defgroup lavu_sampfmts Audio sample formats
|
||||
*
|
||||
* Audio sample format enumeration and related convenience functions.
|
||||
* @{
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Audio sample formats
|
||||
*
|
||||
* - The data described by the sample format is always in native-endian order.
|
||||
* Sample values can be expressed by native C types, hence the lack of a signed
|
||||
* 24-bit sample format even though it is a common raw audio data format.
|
||||
*
|
||||
* - The floating-point formats are based on full volume being in the range
|
||||
* [-1.0, 1.0]. Any values outside this range are beyond full volume level.
|
||||
*
|
||||
* - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
|
||||
* (such as AVFrame in libavcodec) is as follows:
|
||||
*
|
||||
* @par
|
||||
* The data described by the sample format is always in native-endian order.
|
||||
* Sample values can be expressed by native C types, hence the lack of a signed
|
||||
* 24-bit sample format even though it is a common raw audio data format.
|
||||
*
|
||||
* @par
|
||||
* The floating-point formats are based on full volume being in the range
|
||||
* [-1.0, 1.0]. Any values outside this range are beyond full volume level.
|
||||
*
|
||||
* @par
|
||||
* The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
|
||||
* (such as AVFrame in libavcodec) is as follows:
|
||||
*
|
||||
* For planar sample formats, each audio channel is in a separate data plane,
|
||||
* and linesize is the buffer size, in bytes, for a single plane. All data
|
||||
* planes must be the same size. For packed sample formats, only the first data
|
||||
* plane is used, and samples for each channel are interleaved. In this case,
|
||||
* linesize is the buffer size, in bytes, for the 1 plane.
|
||||
*
|
||||
*/
|
||||
enum AVSampleFormat {
|
||||
AV_SAMPLE_FMT_NONE = -1,
|
||||
@@ -157,6 +167,15 @@ int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
|
||||
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
|
||||
enum AVSampleFormat sample_fmt, int align);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @defgroup lavu_sampmanip Samples manipulation
|
||||
*
|
||||
* Functions that manipulate audio samples
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Fill plane data pointers and linesize for samples with sample
|
||||
* format sample_fmt.
|
||||
@@ -253,4 +272,8 @@ int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
|
||||
int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
|
||||
int nb_channels, enum AVSampleFormat sample_fmt);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
*/
|
||||
#endif /* AVUTIL_SAMPLEFMT_H */
|
||||
|
||||
@@ -18,6 +18,9 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_STEREO3D_H
|
||||
#define AVUTIL_STEREO3D_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "frame.h"
|
||||
@@ -145,3 +148,5 @@ AVStereo3D *av_stereo3d_alloc(void);
|
||||
* @return The AVStereo3D structure to be filled by caller.
|
||||
*/
|
||||
AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);
|
||||
|
||||
#endif /* AVUTIL_STEREO3D_H */
|
||||
|
||||
91
libs/FFmpeg/include/libavutil/threadmessage.h
Normal file
91
libs/FFmpeg/include/libavutil/threadmessage.h
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public License
|
||||
* as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVUTIL_THREADMESSAGE_H
|
||||
#define AVUTIL_THREADMESSAGE_H
|
||||
|
||||
typedef struct AVThreadMessageQueue AVThreadMessageQueue;
|
||||
|
||||
typedef enum AVThreadMessageFlags {
|
||||
|
||||
/**
|
||||
* Perform non-blocking operation.
|
||||
* If this flag is set, send and recv operations are non-blocking and
|
||||
* return AVERROR(EAGAIN) immediately if they can not proceed.
|
||||
*/
|
||||
AV_THREAD_MESSAGE_NONBLOCK = 1,
|
||||
|
||||
} AVThreadMessageFlags;
|
||||
|
||||
/**
|
||||
* Allocate a new message queue.
|
||||
*
|
||||
* @param mq pointer to the message queue
|
||||
* @param nelem maximum number of elements in the queue
|
||||
* @param elsize size of each element in the queue
|
||||
* @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if
|
||||
* lavu was built without thread support
|
||||
*/
|
||||
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
|
||||
unsigned nelem,
|
||||
unsigned elsize);
|
||||
|
||||
/**
|
||||
* Free a message queue.
|
||||
*
|
||||
* The message queue must no longer be in use by another thread.
|
||||
*/
|
||||
void av_thread_message_queue_free(AVThreadMessageQueue **mq);
|
||||
|
||||
/**
|
||||
* Send a message on the queue.
|
||||
*/
|
||||
int av_thread_message_queue_send(AVThreadMessageQueue *mq,
|
||||
void *msg,
|
||||
unsigned flags);
|
||||
|
||||
/**
|
||||
* Receive a message from the queue.
|
||||
*/
|
||||
int av_thread_message_queue_recv(AVThreadMessageQueue *mq,
|
||||
void *msg,
|
||||
unsigned flags);
|
||||
|
||||
/**
|
||||
* Set the sending error code.
|
||||
*
|
||||
* If the error code is set to non-zero, av_thread_message_queue_recv() will
|
||||
* return it immediately when there are no longer available messages.
|
||||
* Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used
|
||||
* to cause the receiving thread to stop or suspend its operation.
|
||||
*/
|
||||
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
|
||||
int err);
|
||||
|
||||
/**
|
||||
* Set the receiving error code.
|
||||
*
|
||||
* If the error code is set to non-zero, av_thread_message_queue_send() will
|
||||
* return it immediately. Conventional values, such as AVERROR_EOF or
|
||||
* AVERROR(EAGAIN), can be used to cause the sending thread to stop or
|
||||
* suspend its operation.
|
||||
*/
|
||||
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
|
||||
int err);
|
||||
|
||||
#endif /* AVUTIL_THREADMESSAGE_H */
|
||||
@@ -28,6 +28,21 @@
|
||||
*/
|
||||
int64_t av_gettime(void);
|
||||
|
||||
/**
|
||||
* Get the current time in microseconds since some unspecified starting point.
|
||||
* On platforms that support it, the time comes from a monotonic clock
|
||||
* This property makes this time source ideal for measuring relative time.
|
||||
* If a monotonic clock is not available on the targeted platform, the
|
||||
* implementation fallsback on using av_gettime().
|
||||
*/
|
||||
int64_t av_gettime_relative(void);
|
||||
|
||||
/**
|
||||
* Indicates with a boolean result if the av_gettime_relative() time source
|
||||
* is monotonic.
|
||||
*/
|
||||
int av_gettime_relative_is_monotonic(void);
|
||||
|
||||
/**
|
||||
* Sleep for a period of time. Although the duration is expressed in
|
||||
* microseconds, the actual delay may be rounded to the precision of the
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#include "macros.h"
|
||||
|
||||
/**
|
||||
* @defgroup version_utils Library Version Macros
|
||||
* @addtogroup version_utils
|
||||
*
|
||||
* Useful to check and match library version in order to maintain
|
||||
* backward compatibility.
|
||||
@@ -56,7 +56,7 @@
|
||||
*/
|
||||
|
||||
#define LIBAVUTIL_VERSION_MAJOR 52
|
||||
#define LIBAVUTIL_VERSION_MINOR 66
|
||||
#define LIBAVUTIL_VERSION_MINOR 92
|
||||
#define LIBAVUTIL_VERSION_MICRO 100
|
||||
|
||||
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
|
||||
@@ -137,6 +137,18 @@
|
||||
#ifndef FF_API_OPT_TYPE_METADATA
|
||||
#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 54)
|
||||
#endif
|
||||
#ifndef FF_API_AVFRAME_COLORSPACE
|
||||
#define FF_API_AVFRAME_COLORSPACE (LIBAVUTIL_VERSION_MAJOR >= 52)
|
||||
#endif
|
||||
|
||||
|
||||
#ifndef FF_CONST_AVUTIL53
|
||||
#if LIBAVUTIL_VERSION_MAJOR >= 53
|
||||
#define FF_CONST_AVUTIL53 const
|
||||
#else
|
||||
#define FF_CONST_AVUTIL53
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @}
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
#ifndef __AMF_H__
|
||||
#define __AMF_H__
|
||||
/*
|
||||
* Copyright (C) 2005-2008 Team XBMC
|
||||
* http://www.xbmc.org
|
||||
* Copyright (C) 2008-2009 Andrej Stepanchuk
|
||||
* Copyright (C) 2009-2010 Howard Chu
|
||||
*
|
||||
* This file is part of librtmp.
|
||||
*
|
||||
* librtmp is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1,
|
||||
* or (at your option) any later version.
|
||||
*
|
||||
* librtmp is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with librtmp see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301, USA.
|
||||
* http://www.gnu.org/copyleft/lgpl.html
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE 1
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
typedef enum
|
||||
{ AMF_NUMBER = 0, AMF_BOOLEAN, AMF_STRING, AMF_OBJECT,
|
||||
AMF_MOVIECLIP, /* reserved, not used */
|
||||
AMF_NULL, AMF_UNDEFINED, AMF_REFERENCE, AMF_ECMA_ARRAY, AMF_OBJECT_END,
|
||||
AMF_STRICT_ARRAY, AMF_DATE, AMF_LONG_STRING, AMF_UNSUPPORTED,
|
||||
AMF_RECORDSET, /* reserved, not used */
|
||||
AMF_XML_DOC, AMF_TYPED_OBJECT,
|
||||
AMF_AVMPLUS, /* switch to AMF3 */
|
||||
AMF_INVALID = 0xff
|
||||
} AMFDataType;
|
||||
|
||||
typedef enum
|
||||
{ AMF3_UNDEFINED = 0, AMF3_NULL, AMF3_FALSE, AMF3_TRUE,
|
||||
AMF3_INTEGER, AMF3_DOUBLE, AMF3_STRING, AMF3_XML_DOC, AMF3_DATE,
|
||||
AMF3_ARRAY, AMF3_OBJECT, AMF3_XML, AMF3_BYTE_ARRAY
|
||||
} AMF3DataType;
|
||||
|
||||
typedef struct AVal
|
||||
{
|
||||
char *av_val;
|
||||
int av_len;
|
||||
} AVal;
|
||||
#define AVC(str) {str,sizeof(str)-1}
|
||||
#define AVMATCH(a1,a2) ((a1)->av_len == (a2)->av_len && !memcmp((a1)->av_val,(a2)->av_val,(a1)->av_len))
|
||||
|
||||
struct AMFObjectProperty;
|
||||
|
||||
typedef struct AMFObject
|
||||
{
|
||||
int o_num;
|
||||
struct AMFObjectProperty *o_props;
|
||||
} AMFObject;
|
||||
|
||||
typedef struct AMFObjectProperty
|
||||
{
|
||||
AVal p_name;
|
||||
AMFDataType p_type;
|
||||
union
|
||||
{
|
||||
double p_number;
|
||||
AVal p_aval;
|
||||
AMFObject p_object;
|
||||
} p_vu;
|
||||
int16_t p_UTCoffset;
|
||||
} AMFObjectProperty;
|
||||
|
||||
char *AMF_EncodeString(char *output, char *outend, const AVal * str);
|
||||
char *AMF_EncodeNumber(char *output, char *outend, double dVal);
|
||||
char *AMF_EncodeInt16(char *output, char *outend, short nVal);
|
||||
char *AMF_EncodeInt24(char *output, char *outend, int nVal);
|
||||
char *AMF_EncodeInt32(char *output, char *outend, int nVal);
|
||||
char *AMF_EncodeBoolean(char *output, char *outend, int bVal);
|
||||
|
||||
/* Shortcuts for AMFProp_Encode */
|
||||
char *AMF_EncodeNamedString(char *output, char *outend, const AVal * name, const AVal * value);
|
||||
char *AMF_EncodeNamedNumber(char *output, char *outend, const AVal * name, double dVal);
|
||||
char *AMF_EncodeNamedBoolean(char *output, char *outend, const AVal * name, int bVal);
|
||||
|
||||
unsigned short AMF_DecodeInt16(const char *data);
|
||||
unsigned int AMF_DecodeInt24(const char *data);
|
||||
unsigned int AMF_DecodeInt32(const char *data);
|
||||
void AMF_DecodeString(const char *data, AVal * str);
|
||||
void AMF_DecodeLongString(const char *data, AVal * str);
|
||||
int AMF_DecodeBoolean(const char *data);
|
||||
double AMF_DecodeNumber(const char *data);
|
||||
|
||||
char *AMF_Encode(AMFObject * obj, char *pBuffer, char *pBufEnd);
|
||||
char *AMF_EncodeEcmaArray(AMFObject *obj, char *pBuffer, char *pBufEnd);
|
||||
char *AMF_EncodeArray(AMFObject *obj, char *pBuffer, char *pBufEnd);
|
||||
|
||||
int AMF_Decode(AMFObject * obj, const char *pBuffer, int nSize,
|
||||
int bDecodeName);
|
||||
int AMF_DecodeArray(AMFObject * obj, const char *pBuffer, int nSize,
|
||||
int nArrayLen, int bDecodeName);
|
||||
int AMF3_Decode(AMFObject * obj, const char *pBuffer, int nSize,
|
||||
int bDecodeName);
|
||||
void AMF_Dump(AMFObject * obj);
|
||||
void AMF_Reset(AMFObject * obj);
|
||||
|
||||
void AMF_AddProp(AMFObject * obj, const AMFObjectProperty * prop);
|
||||
int AMF_CountProp(AMFObject * obj);
|
||||
AMFObjectProperty *AMF_GetProp(AMFObject * obj, const AVal * name,
|
||||
int nIndex);
|
||||
|
||||
AMFDataType AMFProp_GetType(AMFObjectProperty * prop);
|
||||
void AMFProp_SetNumber(AMFObjectProperty * prop, double dval);
|
||||
void AMFProp_SetBoolean(AMFObjectProperty * prop, int bflag);
|
||||
void AMFProp_SetString(AMFObjectProperty * prop, AVal * str);
|
||||
void AMFProp_SetObject(AMFObjectProperty * prop, AMFObject * obj);
|
||||
|
||||
void AMFProp_GetName(AMFObjectProperty * prop, AVal * name);
|
||||
void AMFProp_SetName(AMFObjectProperty * prop, AVal * name);
|
||||
double AMFProp_GetNumber(AMFObjectProperty * prop);
|
||||
int AMFProp_GetBoolean(AMFObjectProperty * prop);
|
||||
void AMFProp_GetString(AMFObjectProperty * prop, AVal * str);
|
||||
void AMFProp_GetObject(AMFObjectProperty * prop, AMFObject * obj);
|
||||
|
||||
int AMFProp_IsValid(AMFObjectProperty * prop);
|
||||
|
||||
char *AMFProp_Encode(AMFObjectProperty * prop, char *pBuffer, char *pBufEnd);
|
||||
int AMF3Prop_Decode(AMFObjectProperty * prop, const char *pBuffer,
|
||||
int nSize, int bDecodeName);
|
||||
int AMFProp_Decode(AMFObjectProperty * prop, const char *pBuffer,
|
||||
int nSize, int bDecodeName);
|
||||
|
||||
void AMFProp_Dump(AMFObjectProperty * prop);
|
||||
void AMFProp_Reset(AMFObjectProperty * prop);
|
||||
|
||||
typedef struct AMF3ClassDef
|
||||
{
|
||||
AVal cd_name;
|
||||
char cd_externalizable;
|
||||
char cd_dynamic;
|
||||
int cd_num;
|
||||
AVal *cd_props;
|
||||
} AMF3ClassDef;
|
||||
|
||||
void AMF3CD_AddProp(AMF3ClassDef * cd, AVal * prop);
|
||||
AVal *AMF3CD_GetProp(AMF3ClassDef * cd, int idx);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __AMF_H__ */
|
||||
@@ -1,47 +0,0 @@
|
||||
#ifndef __RTMP_HTTP_H__
|
||||
#define __RTMP_HTTP_H__
|
||||
/*
|
||||
* Copyright (C) 2010 Howard Chu
|
||||
* Copyright (C) 2010 Antti Ajanki
|
||||
*
|
||||
* This file is part of librtmp.
|
||||
*
|
||||
* librtmp is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1,
|
||||
* or (at your option) any later version.
|
||||
*
|
||||
* librtmp is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with librtmp see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301, USA.
|
||||
* http://www.gnu.org/copyleft/lgpl.html
|
||||
*/
|
||||
|
||||
typedef enum {
|
||||
HTTPRES_OK, /* result OK */
|
||||
HTTPRES_OK_NOT_MODIFIED, /* not modified since last request */
|
||||
HTTPRES_NOT_FOUND, /* not found */
|
||||
HTTPRES_BAD_REQUEST, /* client error */
|
||||
HTTPRES_SERVER_ERROR, /* server reported an error */
|
||||
HTTPRES_REDIRECTED, /* resource has been moved */
|
||||
HTTPRES_LOST_CONNECTION /* connection lost while waiting for data */
|
||||
} HTTPResult;
|
||||
|
||||
struct HTTP_ctx {
|
||||
char *date;
|
||||
int size;
|
||||
int status;
|
||||
void *data;
|
||||
};
|
||||
|
||||
typedef size_t (HTTP_read_callback)(void *ptr, size_t size, size_t nmemb, void *stream);
|
||||
|
||||
HTTPResult HTTP_get(struct HTTP_ctx *http, const char *url, HTTP_read_callback *cb);
|
||||
|
||||
#endif
|
||||
@@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2008-2009 Andrej Stepanchuk
|
||||
* Copyright (C) 2009-2010 Howard Chu
|
||||
*
|
||||
* This file is part of librtmp.
|
||||
*
|
||||
* librtmp is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1,
|
||||
* or (at your option) any later version.
|
||||
*
|
||||
* librtmp is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with librtmp see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301, USA.
|
||||
* http://www.gnu.org/copyleft/lgpl.html
|
||||
*/
|
||||
|
||||
#ifndef __RTMP_LOG_H__
|
||||
#define __RTMP_LOG_H__
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
/* Enable this to get full debugging output */
|
||||
/* #define _DEBUG */
|
||||
|
||||
#ifdef _DEBUG
|
||||
#undef NODEBUG
|
||||
#endif
|
||||
|
||||
typedef enum
|
||||
{ RTMP_LOGCRIT=0, RTMP_LOGERROR, RTMP_LOGWARNING, RTMP_LOGINFO,
|
||||
RTMP_LOGDEBUG, RTMP_LOGDEBUG2, RTMP_LOGALL
|
||||
} RTMP_LogLevel;
|
||||
|
||||
extern RTMP_LogLevel RTMP_debuglevel;
|
||||
|
||||
typedef void (RTMP_LogCallback)(int level, const char *fmt, va_list);
|
||||
void RTMP_LogSetCallback(RTMP_LogCallback *cb);
|
||||
void RTMP_LogSetOutput(FILE *file);
|
||||
#ifdef __GNUC__
|
||||
void RTMP_LogPrintf(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
|
||||
void RTMP_LogStatus(const char *format, ...) __attribute__ ((__format__ (__printf__, 1, 2)));
|
||||
void RTMP_Log(int level, const char *format, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
|
||||
#else
|
||||
void RTMP_LogPrintf(const char *format, ...);
|
||||
void RTMP_LogStatus(const char *format, ...);
|
||||
void RTMP_Log(int level, const char *format, ...);
|
||||
#endif
|
||||
void RTMP_LogHex(int level, const uint8_t *data, unsigned long len);
|
||||
void RTMP_LogHexString(int level, const uint8_t *data, unsigned long len);
|
||||
void RTMP_LogSetLevel(RTMP_LogLevel lvl);
|
||||
RTMP_LogLevel RTMP_LogGetLevel(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,382 +0,0 @@
|
||||
#ifndef __RTMP_H__
|
||||
#define __RTMP_H__
|
||||
/*
|
||||
* Copyright (C) 2005-2008 Team XBMC
|
||||
* http://www.xbmc.org
|
||||
* Copyright (C) 2008-2009 Andrej Stepanchuk
|
||||
* Copyright (C) 2009-2010 Howard Chu
|
||||
*
|
||||
* This file is part of librtmp.
|
||||
*
|
||||
* librtmp is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Lesser General Public License as
|
||||
* published by the Free Software Foundation; either version 2.1,
|
||||
* or (at your option) any later version.
|
||||
*
|
||||
* librtmp is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with librtmp see the file COPYING. If not, write to
|
||||
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
|
||||
* Boston, MA 02110-1301, USA.
|
||||
* http://www.gnu.org/copyleft/lgpl.html
|
||||
*/
|
||||
|
||||
#if !defined(NO_CRYPTO) && !defined(CRYPTO)
|
||||
#define CRYPTO
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include "amf.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#define RTMP_LIB_VERSION 0x020300 /* 2.3 */
|
||||
|
||||
#define RTMP_FEATURE_HTTP 0x01
|
||||
#define RTMP_FEATURE_ENC 0x02
|
||||
#define RTMP_FEATURE_SSL 0x04
|
||||
#define RTMP_FEATURE_MFP 0x08 /* not yet supported */
|
||||
#define RTMP_FEATURE_WRITE 0x10 /* publish, not play */
|
||||
#define RTMP_FEATURE_HTTP2 0x20 /* server-side rtmpt */
|
||||
|
||||
#define RTMP_PROTOCOL_UNDEFINED -1
|
||||
#define RTMP_PROTOCOL_RTMP 0
|
||||
#define RTMP_PROTOCOL_RTMPE RTMP_FEATURE_ENC
|
||||
#define RTMP_PROTOCOL_RTMPT RTMP_FEATURE_HTTP
|
||||
#define RTMP_PROTOCOL_RTMPS RTMP_FEATURE_SSL
|
||||
#define RTMP_PROTOCOL_RTMPTE (RTMP_FEATURE_HTTP|RTMP_FEATURE_ENC)
|
||||
#define RTMP_PROTOCOL_RTMPTS (RTMP_FEATURE_HTTP|RTMP_FEATURE_SSL)
|
||||
#define RTMP_PROTOCOL_RTMFP RTMP_FEATURE_MFP
|
||||
|
||||
#define RTMP_DEFAULT_CHUNKSIZE 128
|
||||
|
||||
/* needs to fit largest number of bytes recv() may return */
|
||||
#define RTMP_BUFFER_CACHE_SIZE (16*1024)
|
||||
|
||||
#define RTMP_CHANNELS 65600
|
||||
|
||||
extern const char RTMPProtocolStringsLower[][7];
|
||||
extern const AVal RTMP_DefaultFlashVer;
|
||||
extern int RTMP_ctrlC;
|
||||
|
||||
uint32_t RTMP_GetTime(void);
|
||||
|
||||
/* RTMP_PACKET_TYPE_... 0x00 */
|
||||
#define RTMP_PACKET_TYPE_CHUNK_SIZE 0x01
|
||||
/* RTMP_PACKET_TYPE_... 0x02 */
|
||||
#define RTMP_PACKET_TYPE_BYTES_READ_REPORT 0x03
|
||||
#define RTMP_PACKET_TYPE_CONTROL 0x04
|
||||
#define RTMP_PACKET_TYPE_SERVER_BW 0x05
|
||||
#define RTMP_PACKET_TYPE_CLIENT_BW 0x06
|
||||
/* RTMP_PACKET_TYPE_... 0x07 */
|
||||
#define RTMP_PACKET_TYPE_AUDIO 0x08
|
||||
#define RTMP_PACKET_TYPE_VIDEO 0x09
|
||||
/* RTMP_PACKET_TYPE_... 0x0A */
|
||||
/* RTMP_PACKET_TYPE_... 0x0B */
|
||||
/* RTMP_PACKET_TYPE_... 0x0C */
|
||||
/* RTMP_PACKET_TYPE_... 0x0D */
|
||||
/* RTMP_PACKET_TYPE_... 0x0E */
|
||||
#define RTMP_PACKET_TYPE_FLEX_STREAM_SEND 0x0F
|
||||
#define RTMP_PACKET_TYPE_FLEX_SHARED_OBJECT 0x10
|
||||
#define RTMP_PACKET_TYPE_FLEX_MESSAGE 0x11
|
||||
#define RTMP_PACKET_TYPE_INFO 0x12
|
||||
#define RTMP_PACKET_TYPE_SHARED_OBJECT 0x13
|
||||
#define RTMP_PACKET_TYPE_INVOKE 0x14
|
||||
/* RTMP_PACKET_TYPE_... 0x15 */
|
||||
#define RTMP_PACKET_TYPE_FLASH_VIDEO 0x16
|
||||
|
||||
#define RTMP_MAX_HEADER_SIZE 18
|
||||
|
||||
#define RTMP_PACKET_SIZE_LARGE 0
|
||||
#define RTMP_PACKET_SIZE_MEDIUM 1
|
||||
#define RTMP_PACKET_SIZE_SMALL 2
|
||||
#define RTMP_PACKET_SIZE_MINIMUM 3
|
||||
|
||||
typedef struct RTMPChunk
|
||||
{
|
||||
int c_headerSize;
|
||||
int c_chunkSize;
|
||||
char *c_chunk;
|
||||
char c_header[RTMP_MAX_HEADER_SIZE];
|
||||
} RTMPChunk;
|
||||
|
||||
typedef struct RTMPPacket
|
||||
{
|
||||
uint8_t m_headerType;
|
||||
uint8_t m_packetType;
|
||||
uint8_t m_hasAbsTimestamp; /* timestamp absolute or relative? */
|
||||
int m_nChannel;
|
||||
uint32_t m_nTimeStamp; /* timestamp */
|
||||
int32_t m_nInfoField2; /* last 4 bytes in a long header */
|
||||
uint32_t m_nBodySize;
|
||||
uint32_t m_nBytesRead;
|
||||
RTMPChunk *m_chunk;
|
||||
char *m_body;
|
||||
} RTMPPacket;
|
||||
|
||||
typedef struct RTMPSockBuf
|
||||
{
|
||||
int sb_socket;
|
||||
int sb_size; /* number of unprocessed bytes in buffer */
|
||||
char *sb_start; /* pointer into sb_pBuffer of next byte to process */
|
||||
char sb_buf[RTMP_BUFFER_CACHE_SIZE]; /* data read from socket */
|
||||
int sb_timedout;
|
||||
void *sb_ssl;
|
||||
} RTMPSockBuf;
|
||||
|
||||
void RTMPPacket_Reset(RTMPPacket *p);
|
||||
void RTMPPacket_Dump(RTMPPacket *p);
|
||||
int RTMPPacket_Alloc(RTMPPacket *p, int nSize);
|
||||
void RTMPPacket_Free(RTMPPacket *p);
|
||||
|
||||
#define RTMPPacket_IsReady(a) ((a)->m_nBytesRead == (a)->m_nBodySize)
|
||||
|
||||
typedef struct RTMP_LNK
|
||||
{
|
||||
AVal hostname;
|
||||
AVal sockshost;
|
||||
|
||||
AVal playpath0; /* parsed from URL */
|
||||
AVal playpath; /* passed in explicitly */
|
||||
AVal tcUrl;
|
||||
AVal swfUrl;
|
||||
AVal pageUrl;
|
||||
AVal app;
|
||||
AVal auth;
|
||||
AVal flashVer;
|
||||
AVal subscribepath;
|
||||
AVal usherToken;
|
||||
AVal token;
|
||||
AVal pubUser;
|
||||
AVal pubPasswd;
|
||||
AMFObject extras;
|
||||
int edepth;
|
||||
|
||||
int seekTime;
|
||||
int stopTime;
|
||||
|
||||
#define RTMP_LF_AUTH 0x0001 /* using auth param */
|
||||
#define RTMP_LF_LIVE 0x0002 /* stream is live */
|
||||
#define RTMP_LF_SWFV 0x0004 /* do SWF verification */
|
||||
#define RTMP_LF_PLST 0x0008 /* send playlist before play */
|
||||
#define RTMP_LF_BUFX 0x0010 /* toggle stream on BufferEmpty msg */
|
||||
#define RTMP_LF_FTCU 0x0020 /* free tcUrl on close */
|
||||
int lFlags;
|
||||
|
||||
int swfAge;
|
||||
|
||||
int protocol;
|
||||
int timeout; /* connection timeout in seconds */
|
||||
|
||||
#define RTMP_PUB_NAME 0x0001 /* send login to server */
|
||||
#define RTMP_PUB_RESP 0x0002 /* send salted password hash */
|
||||
#define RTMP_PUB_ALLOC 0x0004 /* allocated data for new tcUrl & app */
|
||||
#define RTMP_PUB_CLEAN 0x0008 /* need to free allocated data for newer tcUrl & app at exit */
|
||||
#define RTMP_PUB_CLATE 0x0010 /* late clean tcUrl & app at exit */
|
||||
int pFlags;
|
||||
|
||||
unsigned short socksport;
|
||||
unsigned short port;
|
||||
|
||||
#ifdef CRYPTO
|
||||
#define RTMP_SWF_HASHLEN 32
|
||||
void *dh; /* for encryption */
|
||||
void *rc4keyIn;
|
||||
void *rc4keyOut;
|
||||
|
||||
uint32_t SWFSize;
|
||||
uint8_t SWFHash[RTMP_SWF_HASHLEN];
|
||||
char SWFVerificationResponse[RTMP_SWF_HASHLEN+10];
|
||||
#endif
|
||||
} RTMP_LNK;
|
||||
|
||||
/* state for read() wrapper */
|
||||
typedef struct RTMP_READ
|
||||
{
|
||||
char *buf;
|
||||
char *bufpos;
|
||||
unsigned int buflen;
|
||||
uint32_t timestamp;
|
||||
uint8_t dataType;
|
||||
uint8_t flags;
|
||||
#define RTMP_READ_HEADER 0x01
|
||||
#define RTMP_READ_RESUME 0x02
|
||||
#define RTMP_READ_NO_IGNORE 0x04
|
||||
#define RTMP_READ_GOTKF 0x08
|
||||
#define RTMP_READ_GOTFLVK 0x10
|
||||
#define RTMP_READ_SEEKING 0x20
|
||||
int8_t status;
|
||||
#define RTMP_READ_COMPLETE -3
|
||||
#define RTMP_READ_ERROR -2
|
||||
#define RTMP_READ_EOF -1
|
||||
#define RTMP_READ_IGNORE 0
|
||||
|
||||
/* if bResume == TRUE */
|
||||
uint8_t initialFrameType;
|
||||
uint32_t nResumeTS;
|
||||
char *metaHeader;
|
||||
char *initialFrame;
|
||||
uint32_t nMetaHeaderSize;
|
||||
uint32_t nInitialFrameSize;
|
||||
uint32_t nIgnoredFrameCounter;
|
||||
uint32_t nIgnoredFlvFrameCounter;
|
||||
} RTMP_READ;
|
||||
|
||||
typedef struct RTMP_METHOD
|
||||
{
|
||||
AVal name;
|
||||
int num;
|
||||
} RTMP_METHOD;
|
||||
|
||||
typedef struct RTMP
|
||||
{
|
||||
int m_inChunkSize;
|
||||
int m_outChunkSize;
|
||||
int m_nBWCheckCounter;
|
||||
int m_nBytesIn;
|
||||
int m_nBytesInSent;
|
||||
int m_nBufferMS;
|
||||
int m_stream_id; /* returned in _result from createStream */
|
||||
int m_mediaChannel;
|
||||
uint32_t m_mediaStamp;
|
||||
uint32_t m_pauseStamp;
|
||||
int m_pausing;
|
||||
int m_nServerBW;
|
||||
int m_nClientBW;
|
||||
uint8_t m_nClientBW2;
|
||||
uint8_t m_bPlaying;
|
||||
uint8_t m_bSendEncoding;
|
||||
uint8_t m_bSendCounter;
|
||||
|
||||
int m_numInvokes;
|
||||
int m_numCalls;
|
||||
RTMP_METHOD *m_methodCalls; /* remote method calls queue */
|
||||
|
||||
int m_channelsAllocatedIn;
|
||||
int m_channelsAllocatedOut;
|
||||
RTMPPacket **m_vecChannelsIn;
|
||||
RTMPPacket **m_vecChannelsOut;
|
||||
int *m_channelTimestamp; /* abs timestamp of last packet */
|
||||
|
||||
double m_fAudioCodecs; /* audioCodecs for the connect packet */
|
||||
double m_fVideoCodecs; /* videoCodecs for the connect packet */
|
||||
double m_fEncoding; /* AMF0 or AMF3 */
|
||||
|
||||
double m_fDuration; /* duration of stream in seconds */
|
||||
|
||||
int m_msgCounter; /* RTMPT stuff */
|
||||
int m_polling;
|
||||
int m_resplen;
|
||||
int m_unackd;
|
||||
AVal m_clientID;
|
||||
|
||||
RTMP_READ m_read;
|
||||
RTMPPacket m_write;
|
||||
RTMPSockBuf m_sb;
|
||||
RTMP_LNK Link;
|
||||
} RTMP;
|
||||
|
||||
int RTMP_ParseURL(const char *url, int *protocol, AVal *host,
|
||||
unsigned int *port, AVal *playpath, AVal *app);
|
||||
|
||||
void RTMP_ParsePlaypath(AVal *in, AVal *out);
|
||||
void RTMP_SetBufferMS(RTMP *r, int size);
|
||||
void RTMP_UpdateBufferMS(RTMP *r);
|
||||
|
||||
int RTMP_SetOpt(RTMP *r, const AVal *opt, AVal *arg);
|
||||
int RTMP_SetupURL(RTMP *r, char *url);
|
||||
void RTMP_SetupStream(RTMP *r, int protocol,
|
||||
AVal *hostname,
|
||||
unsigned int port,
|
||||
AVal *sockshost,
|
||||
AVal *playpath,
|
||||
AVal *tcUrl,
|
||||
AVal *swfUrl,
|
||||
AVal *pageUrl,
|
||||
AVal *app,
|
||||
AVal *auth,
|
||||
AVal *swfSHA256Hash,
|
||||
uint32_t swfSize,
|
||||
AVal *flashVer,
|
||||
AVal *subscribepath,
|
||||
AVal *usherToken,
|
||||
int dStart,
|
||||
int dStop, int bLiveStream, long int timeout);
|
||||
|
||||
int RTMP_Connect(RTMP *r, RTMPPacket *cp);
|
||||
struct sockaddr;
|
||||
int RTMP_Connect0(RTMP *r, struct sockaddr *svc);
|
||||
int RTMP_Connect1(RTMP *r, RTMPPacket *cp);
|
||||
int RTMP_Serve(RTMP *r);
|
||||
int RTMP_TLS_Accept(RTMP *r, void *ctx);
|
||||
|
||||
int RTMP_ReadPacket(RTMP *r, RTMPPacket *packet);
|
||||
int RTMP_SendPacket(RTMP *r, RTMPPacket *packet, int queue);
|
||||
int RTMP_SendChunk(RTMP *r, RTMPChunk *chunk);
|
||||
int RTMP_IsConnected(RTMP *r);
|
||||
int RTMP_Socket(RTMP *r);
|
||||
int RTMP_IsTimedout(RTMP *r);
|
||||
double RTMP_GetDuration(RTMP *r);
|
||||
int RTMP_ToggleStream(RTMP *r);
|
||||
|
||||
int RTMP_ConnectStream(RTMP *r, int seekTime);
|
||||
int RTMP_ReconnectStream(RTMP *r, int seekTime);
|
||||
void RTMP_DeleteStream(RTMP *r);
|
||||
int RTMP_GetNextMediaPacket(RTMP *r, RTMPPacket *packet);
|
||||
int RTMP_ClientPacket(RTMP *r, RTMPPacket *packet);
|
||||
|
||||
void RTMP_Init(RTMP *r);
|
||||
void RTMP_Close(RTMP *r);
|
||||
RTMP *RTMP_Alloc(void);
|
||||
void RTMP_Free(RTMP *r);
|
||||
void RTMP_EnableWrite(RTMP *r);
|
||||
|
||||
void *RTMP_TLS_AllocServerContext(const char* cert, const char* key);
|
||||
void RTMP_TLS_FreeServerContext(void *ctx);
|
||||
|
||||
int RTMP_LibVersion(void);
|
||||
void RTMP_UserInterrupt(void); /* user typed Ctrl-C */
|
||||
|
||||
int RTMP_SendCtrl(RTMP *r, short nType, unsigned int nObject,
|
||||
unsigned int nTime);
|
||||
|
||||
/* caller probably doesn't know current timestamp, should
|
||||
* just use RTMP_Pause instead
|
||||
*/
|
||||
int RTMP_SendPause(RTMP *r, int DoPause, int dTime);
|
||||
int RTMP_Pause(RTMP *r, int DoPause);
|
||||
|
||||
int RTMP_FindFirstMatchingProperty(AMFObject *obj, const AVal *name,
|
||||
AMFObjectProperty * p);
|
||||
|
||||
int RTMPSockBuf_Fill(RTMPSockBuf *sb);
|
||||
int RTMPSockBuf_Send(RTMPSockBuf *sb, const char *buf, int len);
|
||||
int RTMPSockBuf_Close(RTMPSockBuf *sb);
|
||||
|
||||
int RTMP_SendCreateStream(RTMP *r);
|
||||
int RTMP_SendSeek(RTMP *r, int dTime);
|
||||
int RTMP_SendServerBW(RTMP *r);
|
||||
int RTMP_SendClientBW(RTMP *r);
|
||||
void RTMP_DropRequest(RTMP *r, int i, int freeit);
|
||||
int RTMP_Read(RTMP *r, char *buf, int size);
|
||||
int RTMP_Write(RTMP *r, const char *buf, int size);
|
||||
|
||||
/* hashswf.c */
|
||||
int RTMP_HashSWF(const char *url, unsigned int *size, unsigned char *hash,
|
||||
int age);
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -38,10 +38,16 @@
|
||||
* allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
|
||||
* must be set with the @ref avoptions API.
|
||||
*
|
||||
* The first thing you will need to do in order to use lswr is to allocate
|
||||
* SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
|
||||
* are using the former, you must set options through the @ref avoptions API.
|
||||
* The latter function provides the same feature, but it allows you to set some
|
||||
* common options in the same statement.
|
||||
*
|
||||
* For example the following code will setup conversion from planar float sample
|
||||
* format to interleaved signed 16-bit integer, downsampling from 48kHz to
|
||||
* 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
|
||||
* matrix):
|
||||
* matrix). This is using the swr_alloc() function.
|
||||
* @code
|
||||
* SwrContext *swr = swr_alloc();
|
||||
* av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
|
||||
@@ -52,10 +58,24 @@
|
||||
* av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
* @endcode
|
||||
*
|
||||
* The same job can be done using swr_alloc_set_opts() as well:
|
||||
* @code
|
||||
* SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
|
||||
* AV_CH_LAYOUT_STEREO, // out_ch_layout
|
||||
* AV_SAMPLE_FMT_S16, // out_sample_fmt
|
||||
* 44100, // out_sample_rate
|
||||
* AV_CH_LAYOUT_5POINT1, // in_ch_layout
|
||||
* AV_SAMPLE_FMT_FLTP, // in_sample_fmt
|
||||
* 48000, // in_sample_rate
|
||||
* 0, // log_offset
|
||||
* NULL); // log_ctx
|
||||
* @endcode
|
||||
*
|
||||
* Once all values have been set, it must be initialized with swr_init(). If
|
||||
* you need to change the conversion parameters, you can change the parameters
|
||||
* as described above, or by using swr_alloc_set_opts(), then call swr_init()
|
||||
* again.
|
||||
* using @ref AVOptions, as described above in the first example; or by using
|
||||
* swr_alloc_set_opts(), but with the first argument the allocated context.
|
||||
* You must then call swr_init() again.
|
||||
*
|
||||
* The conversion itself is done by repeatedly calling swr_convert().
|
||||
* Note that the samples may get buffered in swr if you provide insufficient
|
||||
@@ -65,6 +85,10 @@
|
||||
* At the end of conversion the resampling buffer can be flushed by calling
|
||||
* swr_convert() with NULL in and 0 in_count.
|
||||
*
|
||||
* The samples used in the conversion process can be managed with the libavutil
|
||||
* @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
|
||||
* function used in the following example.
|
||||
*
|
||||
* The delay between input and output, can at any time be found by using
|
||||
* swr_get_delay().
|
||||
*
|
||||
@@ -89,6 +113,9 @@
|
||||
*
|
||||
* When the conversion is finished, the conversion
|
||||
* context and everything associated with it must be freed with swr_free().
|
||||
* A swr_close() function is also available, but it exists mainly for
|
||||
* compatibility with libavresample, and is not required to be called.
|
||||
*
|
||||
* There will be no memory leak if the data is not completely flushed before
|
||||
* swr_free().
|
||||
*/
|
||||
@@ -102,10 +129,18 @@
|
||||
#define SWR_CH_MAX 32 ///< Maximum number of channels
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @name Option constants
|
||||
* These constants are used for the @ref avoptions interface for lswr.
|
||||
* @{
|
||||
*
|
||||
*/
|
||||
|
||||
#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate
|
||||
//TODO use int resample ?
|
||||
//long term TODO can we enable this dynamically?
|
||||
|
||||
/** Dithering algorithms */
|
||||
enum SwrDitherType {
|
||||
SWR_DITHER_NONE = 0,
|
||||
SWR_DITHER_RECTANGULAR,
|
||||
@@ -137,16 +172,32 @@ enum SwrFilterType {
|
||||
SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
|
||||
};
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The libswresample context. Unlike libavcodec and libavformat, this structure
|
||||
* is opaque. This means that if you would like to set options, you must use
|
||||
* the @ref avoptions API and cannot directly set values to members of the
|
||||
* structure.
|
||||
*/
|
||||
typedef struct SwrContext SwrContext;
|
||||
|
||||
/**
|
||||
* Get the AVClass for swrContext. It can be used in combination with
|
||||
* Get the AVClass for SwrContext. It can be used in combination with
|
||||
* AV_OPT_SEARCH_FAKE_OBJ for examining options.
|
||||
*
|
||||
* @see av_opt_find().
|
||||
* @return the AVClass of SwrContext
|
||||
*/
|
||||
const AVClass *swr_get_class(void);
|
||||
|
||||
/**
|
||||
* @name SwrContext constructor functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Allocate SwrContext.
|
||||
*
|
||||
@@ -161,6 +212,7 @@ struct SwrContext *swr_alloc(void);
|
||||
/**
|
||||
* Initialize context after user parameters have been set.
|
||||
*
|
||||
* @param[in,out] s Swr context to initialize
|
||||
* @return AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_init(struct SwrContext *s);
|
||||
@@ -168,6 +220,8 @@ int swr_init(struct SwrContext *s);
|
||||
/**
|
||||
* Check whether an swr context has been initialized or not.
|
||||
*
|
||||
* @param[in] s Swr context to check
|
||||
* @see swr_init()
|
||||
* @return positive if it has been initialized, 0 if not initialized
|
||||
*/
|
||||
int swr_is_initialized(struct SwrContext *s);
|
||||
@@ -179,7 +233,7 @@ int swr_is_initialized(struct SwrContext *s);
|
||||
* other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
|
||||
* on the allocated context.
|
||||
*
|
||||
* @param s Swr context, can be NULL
|
||||
* @param s existing Swr context if available, or NULL if not
|
||||
* @param out_ch_layout output channel layout (AV_CH_LAYOUT_*)
|
||||
* @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*).
|
||||
* @param out_sample_rate output sample rate (frequency in Hz)
|
||||
@@ -197,20 +251,47 @@ struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
|
||||
int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
|
||||
int log_offset, void *log_ctx);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name SwrContext destructor functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Free the given SwrContext and set the pointer to NULL.
|
||||
*
|
||||
* @param[in] s a pointer to a pointer to Swr context
|
||||
*/
|
||||
void swr_free(struct SwrContext **s);
|
||||
|
||||
/**
|
||||
* Convert audio.
|
||||
* Closes the context so that swr_is_initialized() returns 0.
|
||||
*
|
||||
* The context can be brought back to life by running swr_init(),
|
||||
* swr_init() can also be used without swr_close().
|
||||
* This function is mainly provided for simplifying the usecase
|
||||
* where one tries to support libavresample and libswresample.
|
||||
*
|
||||
* @param[in,out] s Swr context to be closed
|
||||
*/
|
||||
void swr_close(struct SwrContext *s);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Core conversion functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** Convert audio.
|
||||
*
|
||||
* in and in_count can be set to 0 to flush the last few samples out at the
|
||||
* end.
|
||||
*
|
||||
* If more input is provided than output space then the input will be buffered.
|
||||
* You can avoid this buffering by providing more output space than input.
|
||||
* Convertion will run directly without copying whenever possible.
|
||||
* Conversion will run directly without copying whenever possible.
|
||||
*
|
||||
* @param s allocated Swr context, with parameters set
|
||||
* @param out output buffers, only the first one need be set in case of packed audio
|
||||
@@ -228,28 +309,54 @@ int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
|
||||
* timestamps are in 1/(in_sample_rate * out_sample_rate) units.
|
||||
*
|
||||
* @note There are 2 slightly differently behaving modes.
|
||||
* First is when automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
|
||||
* @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
|
||||
* in this case timestamps will be passed through with delays compensated
|
||||
* Second is when automatic timestamp compensation is used, (min_compensation < FLT_MAX)
|
||||
* in this case the output timestamps will match output sample numbers
|
||||
* @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
|
||||
* in this case the output timestamps will match output sample numbers.
|
||||
* See ffmpeg-resampler(1) for the two modes of compensation.
|
||||
*
|
||||
* @param pts timestamp for the next input sample, INT64_MIN if unknown
|
||||
* @param s[in] initialized Swr context
|
||||
* @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
|
||||
* @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
|
||||
* function used internally for timestamp compensation.
|
||||
* @return the output timestamp for the next output sample
|
||||
*/
|
||||
int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
|
||||
|
||||
/**
|
||||
* Activate resampling compensation.
|
||||
* @}
|
||||
*
|
||||
* @name Low-level option setting functions
|
||||
* These functons provide a means to set low-level options that is not possible
|
||||
* with the AVOption API.
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Activate resampling compensation ("soft" compensation). This function is
|
||||
* internally called when needed in swr_next_pts().
|
||||
*
|
||||
* @param[in,out] s allocated Swr context. If it is not initialized,
|
||||
* or SWR_FLAG_RESAMPLE is not set, swr_init() is
|
||||
* called with the flag set.
|
||||
* @param[in] sample_delta delta in PTS per sample
|
||||
* @param[in] compensation_distance number of samples to compensate for
|
||||
* @return >= 0 on success, AVERROR error codes if:
|
||||
* @li @c s is NULL,
|
||||
* @li @c compensation_distance is less than 0,
|
||||
* @li @c compensation_distance is 0 but sample_delta is not,
|
||||
* @li compensation unsupported by resampler, or
|
||||
* @li swr_init() fails when called.
|
||||
*/
|
||||
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
|
||||
|
||||
/**
|
||||
* Set a customized input channel mapping.
|
||||
*
|
||||
* @param s allocated Swr context, not yet initialized
|
||||
* @param channel_map customized input channel mapping (array of channel
|
||||
* indexes, -1 for a muted channel)
|
||||
* @return AVERROR error code in case of failure.
|
||||
* @param[in,out] s allocated Swr context, not yet initialized
|
||||
* @param[in] channel_map customized input channel mapping (array of channel
|
||||
* indexes, -1 for a muted channel)
|
||||
* @return >= 0 on success, or AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
|
||||
|
||||
@@ -260,17 +367,40 @@ int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
|
||||
* @param matrix remix coefficients; matrix[i + stride * o] is
|
||||
* the weight of input channel i in output channel o
|
||||
* @param stride offset between lines of the matrix
|
||||
* @return AVERROR error code in case of failure.
|
||||
* @return >= 0 on success, or AVERROR error code in case of failure.
|
||||
*/
|
||||
int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*
|
||||
* @name Sample handling functions
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Drops the specified number of output samples.
|
||||
*
|
||||
* This function, along with swr_inject_silence(), is called by swr_next_pts()
|
||||
* if needed for "hard" compensation.
|
||||
*
|
||||
* @param s allocated Swr context
|
||||
* @param count number of samples to be dropped
|
||||
*
|
||||
* @return >= 0 on success, or a negative AVERROR code on failure
|
||||
*/
|
||||
int swr_drop_output(struct SwrContext *s, int count);
|
||||
|
||||
/**
|
||||
* Injects the specified number of silence samples.
|
||||
*
|
||||
* This function, along with swr_drop_output(), is called by swr_next_pts()
|
||||
* if needed for "hard" compensation.
|
||||
*
|
||||
* @param s allocated Swr context
|
||||
* @param count number of samples to be dropped
|
||||
*
|
||||
* @return >= 0 on success, or a negative AVERROR code on failure
|
||||
*/
|
||||
int swr_inject_silence(struct SwrContext *s, int count);
|
||||
|
||||
@@ -286,32 +416,53 @@ int swr_inject_silence(struct SwrContext *s, int count);
|
||||
* for upsampling and the input sample rate.
|
||||
*
|
||||
* @param s swr context
|
||||
* @param base timebase in which the returned delay will be
|
||||
* if its set to 1 the returned delay is in seconds
|
||||
* if its set to 1000 the returned delay is in milli seconds
|
||||
* if its set to the input sample rate then the returned delay is in input samples
|
||||
* if its set to the output sample rate then the returned delay is in output samples
|
||||
* an exact rounding free delay can be found by using LCM(in_sample_rate, out_sample_rate)
|
||||
* @returns the delay in 1/base units.
|
||||
* @param base timebase in which the returned delay will be:
|
||||
* @li if it's set to 1 the returned delay is in seconds
|
||||
* @li if it's set to 1000 the returned delay is in milliseconds
|
||||
* @li if it's set to the input sample rate then the returned
|
||||
* delay is in input samples
|
||||
* @li if it's set to the output sample rate then the returned
|
||||
* delay is in output samples
|
||||
* @li if it's the least common multiple of in_sample_rate and
|
||||
* out_sample_rate then an exact rounding-free delay will be
|
||||
* returned
|
||||
* @returns the delay in 1 / @c base units.
|
||||
*/
|
||||
int64_t swr_get_delay(struct SwrContext *s, int64_t base);
|
||||
|
||||
/**
|
||||
* Return the LIBSWRESAMPLE_VERSION_INT constant.
|
||||
* @}
|
||||
*
|
||||
* @name Configuration accessors
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
|
||||
*
|
||||
* This is useful to check if the build-time libswresample has the same version
|
||||
* as the run-time one.
|
||||
*
|
||||
* @returns the unsigned int-typed version
|
||||
*/
|
||||
unsigned swresample_version(void);
|
||||
|
||||
/**
|
||||
* Return the swr build-time configuration.
|
||||
*
|
||||
* @returns the build-time @c ./configure flags
|
||||
*/
|
||||
const char *swresample_configuration(void);
|
||||
|
||||
/**
|
||||
* Return the swr license.
|
||||
*
|
||||
* @returns the license of libswresample, determined at build-time
|
||||
*/
|
||||
const char *swresample_license(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
* @}
|
||||
*/
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
#include "libavutil/avutil.h"
|
||||
|
||||
#define LIBSWRESAMPLE_VERSION_MAJOR 0
|
||||
#define LIBSWRESAMPLE_VERSION_MINOR 18
|
||||
#define LIBSWRESAMPLE_VERSION_MINOR 19
|
||||
#define LIBSWRESAMPLE_VERSION_MICRO 100
|
||||
|
||||
#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
|
||||
|
||||
@@ -92,7 +92,9 @@ const char *swscale_license(void);
|
||||
#define SWS_CPU_CAPS_MMX2 0x20000000
|
||||
#define SWS_CPU_CAPS_3DNOW 0x40000000
|
||||
#define SWS_CPU_CAPS_ALTIVEC 0x10000000
|
||||
#if FF_API_ARCH_BFIN
|
||||
#define SWS_CPU_CAPS_BFIN 0x01000000
|
||||
#endif
|
||||
#define SWS_CPU_CAPS_SSE2 0x02000000
|
||||
#endif
|
||||
|
||||
|
||||
@@ -27,8 +27,8 @@
|
||||
#include "libavutil/version.h"
|
||||
|
||||
#define LIBSWSCALE_VERSION_MAJOR 2
|
||||
#define LIBSWSCALE_VERSION_MINOR 5
|
||||
#define LIBSWSCALE_VERSION_MICRO 102
|
||||
#define LIBSWSCALE_VERSION_MINOR 6
|
||||
#define LIBSWSCALE_VERSION_MICRO 100
|
||||
|
||||
#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
|
||||
LIBSWSCALE_VERSION_MINOR, \
|
||||
@@ -55,5 +55,8 @@
|
||||
#ifndef FF_API_SWS_FORMAT_NAME
|
||||
#define FF_API_SWS_FORMAT_NAME (LIBSWSCALE_VERSION_MAJOR < 3)
|
||||
#endif
|
||||
#ifndef FF_API_ARCH_BFIN
|
||||
#define FF_API_ARCH_BFIN (LIBSWSCALE_VERSION_MAJOR < 3)
|
||||
#endif
|
||||
|
||||
#endif /* SWSCALE_VERSION_H */
|
||||
|
||||
Reference in New Issue
Block a user