Add H.265 video support

This commit is contained in:
Cameron Gutman 2016-03-02 06:25:19 -05:00
parent fbd58c60ea
commit 51e5b89018
8 changed files with 122 additions and 54 deletions

View File

@ -15,6 +15,7 @@ STREAM_CONFIGURATION StreamConfig;
CONNECTION_LISTENER_CALLBACKS ListenerCallbacks;
DECODER_RENDERER_CALLBACKS VideoCallbacks;
AUDIO_RENDERER_CALLBACKS AudioCallbacks;
int NegotiatedVideoFormat;
// Connection stages
static const char* stageNames[STAGE_MAX] = {
@ -177,6 +178,7 @@ int LiStartConnection(const char* host, PSTREAM_CONFIGURATION streamConfig, PCON
void* renderContext, int drFlags, int _serverMajorVersion) {
int err;
NegotiatedVideoFormat = 0;
ServerMajorVersion = _serverMajorVersion;
memcpy(&StreamConfig, streamConfig, sizeof(StreamConfig));

View File

@ -1,6 +1,6 @@
#include "Limelight-internal.h"
static void fakeDrSetup(int width, int height, int redrawRate, void* context, int drFlags) {}
static void fakeDrSetup(int videoFormat, int width, int height, int redrawRate, void* context, int drFlags) {}
static void fakeDrCleanup(void) {}
static int fakeDrSubmitDecodeUnit(PDECODE_UNIT decodeUnit) { return DR_OK; }

View File

@ -14,6 +14,7 @@ extern STREAM_CONFIGURATION StreamConfig;
extern CONNECTION_LISTENER_CALLBACKS ListenerCallbacks;
extern DECODER_RENDERER_CALLBACKS VideoCallbacks;
extern AUDIO_RENDERER_CALLBACKS AudioCallbacks;
extern int NegotiatedVideoFormat;
int isBeforeSignedInt(int numA, int numB, int ambiguousCase);

View File

@ -33,6 +33,10 @@ typedef struct _STREAM_CONFIGURATION {
// See AUDIO_CONFIGURATION_XXX constants below.
int audioConfiguration;
// Specifies that the client can accept an H.265 video stream
// if the server is able to provide one.
int supportsHevc;
// AES encryption data for the remote input stream. This must be
// the same as what was passed as rikey and rikeyid
// in /launch and /resume requests.
@ -54,7 +58,7 @@ typedef struct _LENTRY {
int length;
} LENTRY, *PLENTRY;
// A decode unit describes a buffer chain of H264 data from multiple packets
// A decode unit describes a buffer chain of video data from multiple packets
typedef struct _DECODE_UNIT {
// Length of the entire buffer chain in bytes
int fullLength;
@ -69,6 +73,14 @@ typedef struct _DECODE_UNIT {
// Specifies that the audio stream should be in 5.1 surround sound if the PC is able
#define AUDIO_CONFIGURATION_51_SURROUND 1
// Passed to DecoderRendererSetup to indicate that the following video stream will be
// in H.264 format
#define VIDEO_FORMAT_H264 1
// Passed to DecoderRendererSetup to indicate that the following video stream will be
// in H.265 format
#define VIDEO_FORMAT_H265 2
// If set in the renderer capabilities field, this flag will cause audio/video data to
// be submitted directly from the receive thread. This should only be specified if the
// renderer is non-blocking. This flag is valid on both audio and video renderers.
@ -80,17 +92,17 @@ typedef struct _DECODE_UNIT {
#define CAPABILITY_REFERENCE_FRAME_INVALIDATION 0x2
// If set in the video renderer capabilities field, this macro specifies that the renderer
// supports H264 slicing to increase decoding performance. The parameter specifies the desired
// supports slicing to increase decoding performance. The parameter specifies the desired
// number of slices per frame. This capability is only valid on video renderers.
#define CAPABILITY_SLICES_PER_FRAME(x) (((unsigned char)(x)) << 24)
// This callback is invoked to provide details about the video stream and allow configuration of the decoder
typedef void(*DecoderRendererSetup)(int width, int height, int redrawRate, void* context, int drFlags);
typedef void(*DecoderRendererSetup)(int videoFormat, int width, int height, int redrawRate, void* context, int drFlags);
// This callback performs the teardown of the video decoder
typedef void(*DecoderRendererCleanup)(void);
// This callback provides Annex B formatted H264 elementary stream data to the
// This callback provides Annex B formatted elementary stream data to the
// decoder. If the decoder is unable to process the submitted data for some reason,
// it must return DR_NEED_IDR to generate a keyframe.
#define DR_OK 0

View File

@ -339,6 +339,19 @@ int performRtspHandshake(void) {
return response.message.response.statusCode;
}
// The RTSP DESCRIBE reply will contain a collection of SDP media attributes that
// describe the various supported video stream formats and include the SPS, PPS,
// and VPS (if applicable). We will use this information to determine whether the
// server can support HEVC. For some reason, they still set the MIME type of the HEVC
// format to H264, so we can't just look for the HEVC MIME type. What we'll do instead is
// look for the base 64 encoded VPS NALU prefix that is unique to the HEVC bitstream.
if (StreamConfig.supportsHevc && strstr(response.payload, "sprop-parameter-sets=AAAAAU")) {
NegotiatedVideoFormat = VIDEO_FORMAT_H265;
}
else {
NegotiatedVideoFormat = VIDEO_FORMAT_H264;
}
freeMessage(&response);
}

View File

@ -138,22 +138,10 @@ static int addGen3Options(PSDP_OPTION* head, char* addrStr) {
static int addGen4Options(PSDP_OPTION* head, char* addrStr) {
char payloadStr[92];
int err = 0;
unsigned char slicesPerFrame;
sprintf(payloadStr, "rtsp://%s:48010", addrStr);
err |= addAttributeString(head, "x-nv-general.serverAddress", payloadStr);
err |= addAttributeString(head, "x-nv-video[0].rateControlMode", "4");
// Use slicing for increased performance on some decoders
slicesPerFrame = (unsigned char)(VideoCallbacks.capabilities >> 24);
if (slicesPerFrame == 0) {
// If not using slicing, we request 1 slice per frame
slicesPerFrame = 1;
}
sprintf(payloadStr, "%d", slicesPerFrame);
err |= addAttributeString(head, "x-nv-video[0].videoEncoderSlicesPerFrame", payloadStr);
return err;
}
@ -242,6 +230,29 @@ static PSDP_OPTION getAttributesList(char*urlSafeAddr) {
}
if (ServerMajorVersion >= 4) {
if (NegotiatedVideoFormat == VIDEO_FORMAT_H265) {
err |= addAttributeString(&optionHead, "x-nv-clientSupportHevc", "1");
err |= addAttributeString(&optionHead, "x-nv-vqos[0].bitStreamFormat", "1");
// Disable slicing on HEVC
err |= addAttributeString(&optionHead, "x-nv-video[0].videoEncoderSlicesPerFrame", "1");
}
else {
unsigned char slicesPerFrame;
err |= addAttributeString(&optionHead, "x-nv-clientSupportHevc", "0");
err |= addAttributeString(&optionHead, "x-nv-vqos[0].bitStreamFormat", "0");
// Use slicing for increased performance on some decoders
slicesPerFrame = (unsigned char)(VideoCallbacks.capabilities >> 24);
if (slicesPerFrame == 0) {
// If not using slicing, we request 1 slice per frame
slicesPerFrame = 1;
}
sprintf(payloadStr, "%d", slicesPerFrame);
err |= addAttributeString(&optionHead, "x-nv-video[0].videoEncoderSlicesPerFrame", payloadStr);
}
if (StreamConfig.audioConfiguration == AUDIO_CONFIGURATION_51_SURROUND) {
audioChannelCount = CHANNEL_COUNT_51_SURROUND;
audioChannelMask = CHANNEL_MASK_51_SURROUND;

View File

@ -46,8 +46,8 @@ void initializeVideoDepacketizer(int pktSize) {
strictIdrFrameWait = !(VideoCallbacks.capabilities & CAPABILITY_REFERENCE_FRAME_INVALIDATION);
}
// Free malloced memory in AvcFrameState*/
static void cleanupAvcFrameState(void) {
// Free the NAL chain
static void cleanupFrameState(void) {
PLENTRY lastEntry;
while (nalChainHead != NULL) {
@ -59,8 +59,8 @@ static void cleanupAvcFrameState(void) {
nalChainDataLength = 0;
}
// Cleanup AVC frame state and set that we're waiting for an IDR Frame*/
static void dropAvcFrameState(void) {
// Cleanup frame state and set that we're waiting for an IDR Frame
static void dropFrameState(void) {
// We'll need an IDR frame now if we're in strict mode
if (strictIdrFrameWait) {
waitingForIdrFrame = 1;
@ -81,7 +81,7 @@ static void dropAvcFrameState(void) {
requestIdrOnDemand();
}
cleanupAvcFrameState();
cleanupFrameState();
}
// Cleanup the list of decode units
@ -109,7 +109,7 @@ void destroyVideoDepacketizer(void) {
freeDecodeUnitList(LbqDestroyLinkedBlockingQueue(&decodeUnitQueue));
}
cleanupAvcFrameState();
cleanupFrameState();
}
// Returns 1 if candidate is a frame start and 0 otherwise
@ -117,8 +117,8 @@ static int isSeqFrameStart(PBUFFER_DESC candidate) {
return (candidate->length == 4 && candidate->data[candidate->offset + candidate->length - 1] == 1);
}
// Returns 1 if candidate an AVC start and 0 otherwise
static int isSeqAvcStart(PBUFFER_DESC candidate) {
// Returns 1 if candidate is an Annex B start and 0 otherwise
static int isSeqAnnexBStart(PBUFFER_DESC candidate) {
return (candidate->data[candidate->offset + candidate->length - 1] == 1);
}
@ -188,8 +188,39 @@ void freeQueuedDecodeUnit(PQUEUED_DECODE_UNIT qdu) {
free(qdu);
}
// Returns 1 if the special sequence describes an I-frame
static int isSeqReferenceFrameStart(PBUFFER_DESC specialSeq) {
switch (specialSeq->data[specialSeq->offset + specialSeq->length]) {
case 0x20:
case 0x22:
case 0x24:
case 0x26:
case 0x28:
case 0x2A:
// H265
return 1;
case 0x65:
// H264
return 1;
default:
return 0;
}
}
// Returns 1 if this buffer describes an IDR frame
static int isIdrFrameStart(PBUFFER_DESC buffer) {
BUFFER_DESC specialSeq;
return getSpecialSeq(buffer, &specialSeq) &&
isSeqFrameStart(&specialSeq) &&
(specialSeq.data[specialSeq.offset + specialSeq.length] == 0x67 || // H264 SPS
specialSeq.data[specialSeq.offset + specialSeq.length] == 0x40); // H265 VPS
}
// Reassemble the frame with the given frame number
static void reassembleAvcFrame(int frameNumber) {
static void reassembleFrame(int frameNumber) {
if (nalChainHead != NULL) {
PQUEUED_DECODE_UNIT qdu = (PQUEUED_DECODE_UNIT)malloc(sizeof(*qdu));
if (qdu != NULL) {
@ -206,7 +237,7 @@ static void reassembleAvcFrame(int frameNumber) {
// Clear frame state and wait for an IDR
nalChainHead = qdu->decodeUnit.bufferList;
nalChainDataLength = qdu->decodeUnit.fullLength;
dropAvcFrameState();
dropFrameState();
// Free the DU
free(qdu);
@ -268,25 +299,25 @@ static void queueFragment(char*data, int offset, int length) {
// Process an RTP Payload
static void processRtpPayloadSlow(PNV_VIDEO_PACKET videoPacket, PBUFFER_DESC currentPos) {
BUFFER_DESC specialSeq;
int decodingAvc = 0;
int decodingVideo = 0;
while (currentPos->length != 0) {
int start = currentPos->offset;
if (getSpecialSeq(currentPos, &specialSeq)) {
if (isSeqAvcStart(&specialSeq)) {
// Now we're decoding AVC
decodingAvc = 1;
if (isSeqAnnexBStart(&specialSeq)) {
// Now we're decoding video
decodingVideo = 1;
if (isSeqFrameStart(&specialSeq)) {
// Now we're working on a frame
decodingFrame = 1;
// Reassemble any pending frame
reassembleAvcFrame(videoPacket->frameIndex);
reassembleFrame(videoPacket->frameIndex);
if (specialSeq.data[specialSeq.offset + specialSeq.length] == 0x65) {
// This is the NALU code for I-frame data
if (isSeqReferenceFrameStart(&specialSeq)) {
// No longer waiting for an IDR frame
waitingForIdrFrame = 0;
}
}
@ -296,13 +327,13 @@ static void processRtpPayloadSlow(PNV_VIDEO_PACKET videoPacket, PBUFFER_DESC cur
currentPos->offset += specialSeq.length;
}
else {
// Check if this is padding after a full AVC frame
if (decodingAvc && isSeqPadding(currentPos)) {
reassembleAvcFrame(videoPacket->frameIndex);
// Check if this is padding after a full frame
if (decodingVideo && isSeqPadding(currentPos)) {
reassembleFrame(videoPacket->frameIndex);
}
// Not decoding AVC
decodingAvc = 0;
// Not decoding video
decodingVideo = 0;
// Just skip this byte
currentPos->length--;
@ -314,7 +345,7 @@ static void processRtpPayloadSlow(PNV_VIDEO_PACKET videoPacket, PBUFFER_DESC cur
while (currentPos->length != 0) {
// Check if this should end the current NAL
if (getSpecialSeq(currentPos, &specialSeq)) {
if (decodingAvc || !isSeqPadding(&specialSeq)) {
if (decodingVideo || !isSeqPadding(&specialSeq)) {
break;
}
}
@ -324,7 +355,7 @@ static void processRtpPayloadSlow(PNV_VIDEO_PACKET videoPacket, PBUFFER_DESC cur
currentPos->length--;
}
if (decodingAvc) {
if (decodingVideo) {
queueFragment(currentPos->data, start, currentPos->offset - start);
}
}
@ -337,7 +368,7 @@ void requestDecoderRefresh(void) {
waitingForIdrFrame = 1;
// Flush the decode unit queue and pending state
dropAvcFrameState();
dropFrameState();
if ((VideoCallbacks.capabilities & CAPABILITY_DIRECT_SUBMIT) == 0) {
freeDecodeUnitList(LbqFlushQueueItems(&decodeUnitQueue));
}
@ -363,7 +394,7 @@ static void processRtpPayloadFast(BUFFER_DESC location) {
// Process an RTP Payload
void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
BUFFER_DESC currentPos, specialSeq;
BUFFER_DESC currentPos;
int frameIndex;
char flags;
int firstPacket;
@ -403,7 +434,7 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
// Unexpected start of next frame before terminating the last
waitingForNextSuccessfulFrame = 1;
dropAvcFrameState();
dropFrameState();
}
// Look for a non-frame start before a frame start
else if (!firstPacket && !decodingFrame) {
@ -417,7 +448,7 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
waitingForNextSuccessfulFrame = 1;
dropAvcFrameState();
dropFrameState();
decodingFrame = 0;
return;
}
@ -436,7 +467,7 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
// Wait until next complete frame
waitingForNextSuccessfulFrame = 1;
dropAvcFrameState();
dropFrameState();
}
else if (nextFrameNumber != frameIndex) {
// Duplicate packet or FEC dup
@ -458,7 +489,7 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
waitingForNextSuccessfulFrame = 1;
dropAvcFrameState();
dropFrameState();
decodingFrame = 0;
return;
@ -478,10 +509,7 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
currentPos.length -= 8;
}
if (firstPacket &&
getSpecialSeq(&currentPos, &specialSeq) &&
isSeqFrameStart(&specialSeq) &&
specialSeq.data[specialSeq.offset + specialSeq.length] == 0x67)
if (firstPacket && isIdrFrameStart(&currentPos))
{
// SPS and PPS prefix is padded between NALs, so we must decode it with the slow path
processRtpPayloadSlow(videoPacket, &currentPos);
@ -508,11 +536,11 @@ void processRtpPayload(PNV_VIDEO_PACKET videoPacket, int length) {
if (waitingForIdrFrame) {
Limelog("Waiting for IDR frame\n");
dropAvcFrameState();
dropFrameState();
return;
}
reassembleAvcFrame(frameIndex);
reassembleFrame(frameIndex);
startFrameNumber = nextFrameNumber;
}

View File

@ -197,7 +197,8 @@ int startVideoStream(void* rendererContext, int drFlags) {
// This must be called before the decoder thread starts submitting
// decode units
VideoCallbacks.setup(StreamConfig.width,
LC_ASSERT(NegotiatedVideoFormat != 0);
VideoCallbacks.setup(NegotiatedVideoFormat, StreamConfig.width,
StreamConfig.height, StreamConfig.fps, rendererContext, drFlags);
rtpSocket = bindUdpSocket(RemoteAddr.ss_family, RTP_RECV_BUFFER);