mirror of
https://github.com/moonlight-stream/moonlight-ios.git
synced 2025-07-01 23:35:59 +00:00
562 lines
24 KiB
Objective-C
562 lines
24 KiB
Objective-C
//
|
|
// VideoDecoderRenderer.m
|
|
// Moonlight
|
|
//
|
|
// Created by Cameron Gutman on 10/18/14.
|
|
// Copyright (c) 2014 Moonlight Stream. All rights reserved.
|
|
//
|
|
|
|
#import "VideoDecoderRenderer.h"
|
|
#import "StreamView.h"
|
|
|
|
#include <libavformat/avio.h>
|
|
#include <libavutil/mem.h>
|
|
|
|
// Private libavformat API for writing the AV1 Codec Configuration Box
|
|
extern int ff_isom_write_av1c(AVIOContext *pb, const uint8_t *buf, int size,
|
|
int write_seq_header);
|
|
|
|
@implementation VideoDecoderRenderer {
|
|
StreamView* _view;
|
|
id<ConnectionCallbacks> _callbacks;
|
|
float _streamAspectRatio;
|
|
|
|
AVSampleBufferDisplayLayer* displayLayer;
|
|
int videoFormat, videoWidth, videoHeight;
|
|
int frameRate;
|
|
|
|
NSMutableArray *parameterSetBuffers;
|
|
NSData *masteringDisplayColorVolume;
|
|
NSData *contentLightLevelInfo;
|
|
CMVideoFormatDescriptionRef formatDesc;
|
|
|
|
CADisplayLink* _displayLink;
|
|
BOOL framePacing;
|
|
}
|
|
|
|
- (void)reinitializeDisplayLayer
|
|
{
|
|
CALayer *oldLayer = displayLayer;
|
|
|
|
displayLayer = [[AVSampleBufferDisplayLayer alloc] init];
|
|
displayLayer.backgroundColor = [UIColor blackColor].CGColor;
|
|
|
|
// Ensure the AVSampleBufferDisplayLayer is sized to preserve the aspect ratio
|
|
// of the video stream. We used to use AVLayerVideoGravityResizeAspect, but that
|
|
// respects the PAR encoded in the SPS which causes our computed video-relative
|
|
// touch location to be wrong in StreamView if the aspect ratio of the host
|
|
// desktop doesn't match the aspect ratio of the stream.
|
|
CGSize videoSize;
|
|
if (_view.bounds.size.width > _view.bounds.size.height * _streamAspectRatio) {
|
|
videoSize = CGSizeMake(_view.bounds.size.height * _streamAspectRatio, _view.bounds.size.height);
|
|
} else {
|
|
videoSize = CGSizeMake(_view.bounds.size.width, _view.bounds.size.width / _streamAspectRatio);
|
|
}
|
|
displayLayer.position = CGPointMake(CGRectGetMidX(_view.bounds), CGRectGetMidY(_view.bounds));
|
|
displayLayer.bounds = CGRectMake(0, 0, videoSize.width, videoSize.height);
|
|
displayLayer.videoGravity = AVLayerVideoGravityResize;
|
|
|
|
// Hide the layer until we get an IDR frame. This ensures we
|
|
// can see the loading progress label as the stream is starting.
|
|
displayLayer.hidden = YES;
|
|
|
|
if (oldLayer != nil) {
|
|
// Switch out the old display layer with the new one
|
|
[_view.layer replaceSublayer:oldLayer with:displayLayer];
|
|
}
|
|
else {
|
|
[_view.layer addSublayer:displayLayer];
|
|
}
|
|
|
|
if (formatDesc != nil) {
|
|
CFRelease(formatDesc);
|
|
formatDesc = nil;
|
|
}
|
|
}
|
|
|
|
- (id)initWithView:(StreamView*)view callbacks:(id<ConnectionCallbacks>)callbacks streamAspectRatio:(float)aspectRatio useFramePacing:(BOOL)useFramePacing
|
|
{
|
|
self = [super init];
|
|
|
|
_view = view;
|
|
_callbacks = callbacks;
|
|
_streamAspectRatio = aspectRatio;
|
|
framePacing = useFramePacing;
|
|
|
|
parameterSetBuffers = [[NSMutableArray alloc] init];
|
|
|
|
[self reinitializeDisplayLayer];
|
|
|
|
return self;
|
|
}
|
|
|
|
- (void)setupWithVideoFormat:(int)videoFormat width:(int)videoWidth height:(int)videoHeight frameRate:(int)frameRate
|
|
{
|
|
self->videoFormat = videoFormat;
|
|
self->videoWidth = videoWidth;
|
|
self->videoHeight = videoHeight;
|
|
self->frameRate = frameRate;
|
|
}
|
|
|
|
- (void)start
|
|
{
|
|
_displayLink = [CADisplayLink displayLinkWithTarget:self selector:@selector(displayLinkCallback:)];
|
|
if (@available(iOS 15.0, tvOS 15.0, *)) {
|
|
_displayLink.preferredFrameRateRange = CAFrameRateRangeMake(self->frameRate, self->frameRate, self->frameRate);
|
|
}
|
|
else {
|
|
_displayLink.preferredFramesPerSecond = self->frameRate;
|
|
}
|
|
[_displayLink addToRunLoop:[NSRunLoop mainRunLoop] forMode:NSDefaultRunLoopMode];
|
|
}
|
|
|
|
// TODO: Refactor this
|
|
int DrSubmitDecodeUnit(PDECODE_UNIT decodeUnit);
|
|
|
|
- (void)displayLinkCallback:(CADisplayLink *)sender
|
|
{
|
|
VIDEO_FRAME_HANDLE handle;
|
|
PDECODE_UNIT du;
|
|
|
|
while (LiPollNextVideoFrame(&handle, &du)) {
|
|
LiCompleteVideoFrame(handle, DrSubmitDecodeUnit(du));
|
|
|
|
if (framePacing) {
|
|
// Calculate the actual display refresh rate
|
|
double displayRefreshRate = 1 / (_displayLink.targetTimestamp - _displayLink.timestamp);
|
|
|
|
// Only pace frames if the display refresh rate is >= 90% of our stream frame rate.
|
|
// Battery saver, accessibility settings, or device thermals can cause the actual
|
|
// refresh rate of the display to drop below the physical maximum.
|
|
if (displayRefreshRate >= frameRate * 0.9f) {
|
|
// Keep one pending frame to smooth out gaps due to
|
|
// network jitter at the cost of 1 frame of latency
|
|
if (LiGetPendingVideoFrames() == 1) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
- (void)stop
|
|
{
|
|
[_displayLink invalidate];
|
|
}
|
|
|
|
#define NALU_START_PREFIX_SIZE 3
|
|
#define NAL_LENGTH_PREFIX_SIZE 4
|
|
|
|
- (void)updateAnnexBBufferForRange:(CMBlockBufferRef)frameBuffer dataBlock:(CMBlockBufferRef)dataBuffer offset:(int)offset length:(int)nalLength
|
|
{
|
|
OSStatus status;
|
|
size_t oldOffset = CMBlockBufferGetDataLength(frameBuffer);
|
|
|
|
// Append a 4 byte buffer to the frame block for the length prefix
|
|
status = CMBlockBufferAppendMemoryBlock(frameBuffer, NULL,
|
|
NAL_LENGTH_PREFIX_SIZE,
|
|
kCFAllocatorDefault, NULL, 0,
|
|
NAL_LENGTH_PREFIX_SIZE, 0);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferAppendMemoryBlock failed: %d", (int)status);
|
|
return;
|
|
}
|
|
|
|
// Write the length prefix to the new buffer
|
|
const int dataLength = nalLength - NALU_START_PREFIX_SIZE;
|
|
const uint8_t lengthBytes[] = {(uint8_t)(dataLength >> 24), (uint8_t)(dataLength >> 16),
|
|
(uint8_t)(dataLength >> 8), (uint8_t)dataLength};
|
|
status = CMBlockBufferReplaceDataBytes(lengthBytes, frameBuffer,
|
|
oldOffset, NAL_LENGTH_PREFIX_SIZE);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferReplaceDataBytes failed: %d", (int)status);
|
|
return;
|
|
}
|
|
|
|
// Attach the data buffer to the frame buffer by reference
|
|
status = CMBlockBufferAppendBufferReference(frameBuffer, dataBuffer, offset + NALU_START_PREFIX_SIZE, dataLength, 0);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferAppendBufferReference failed: %d", (int)status);
|
|
return;
|
|
}
|
|
}
|
|
|
|
- (NSData*)getAv1CodecConfigurationBox:(NSData*)frameData {
|
|
AVIOContext* ioctx = NULL;
|
|
int err;
|
|
|
|
err = avio_open_dyn_buf(&ioctx);
|
|
if (err < 0) {
|
|
Log(LOG_E, @"avio_open_dyn_buf() failed: %d", err);
|
|
return nil;
|
|
}
|
|
|
|
// The sequence header and any metadata OBUs should always fit in the first buffer
|
|
err = ff_isom_write_av1c(ioctx, (uint8_t*)frameData.bytes, (int)frameData.length, 1);
|
|
if (err < 0) {
|
|
Log(LOG_E, @"ff_isom_write_av1c() failed: %d", err);
|
|
// Fall-through to close and free buffer
|
|
}
|
|
|
|
// Close the dynbuf and get the underlying buffer back (which we must free)
|
|
uint8_t* av1cBuf = NULL;
|
|
int av1cBufLen = avio_close_dyn_buf(ioctx, &av1cBuf);
|
|
|
|
Log(LOG_I, @"av1C block is %d bytes", av1cBufLen);
|
|
|
|
// Only return data if ff_isom_write_av1c() was successful
|
|
NSData* data = nil;
|
|
if (err >= 0 && av1cBufLen > 0) {
|
|
data = [NSData dataWithBytes:av1cBuf length:av1cBufLen];
|
|
}
|
|
else {
|
|
data = nil;
|
|
}
|
|
|
|
av_free(av1cBuf);
|
|
return data;
|
|
}
|
|
|
|
// Much of this logic comes from Chrome
|
|
- (NSDictionary*)createAV1FormatExtensionsDictionaryForDU:(PDECODE_UNIT)du frameData:(NSData*)frameData {
|
|
NSMutableDictionary* extensions = [[NSMutableDictionary alloc] init];
|
|
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_FormatName] = @"av01";
|
|
|
|
// We use the value for YUV without alpha, same as Chrome
|
|
// https://developer.apple.com/library/archive/qa/qa1183/_index.html
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_Depth] = @24;
|
|
|
|
switch (du->colorspace) {
|
|
default:
|
|
case COLORSPACE_REC_601:
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_ColorPrimaries] = (__bridge NSString*)kCMFormatDescriptionColorPrimaries_SMPTE_C;
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_TransferFunction] = (__bridge NSString*)kCMFormatDescriptionTransferFunction_ITU_R_709_2;
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_YCbCrMatrix] = (__bridge NSString*)kCMFormatDescriptionYCbCrMatrix_ITU_R_601_4;
|
|
break;
|
|
case COLORSPACE_REC_709:
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_ColorPrimaries] = (__bridge NSString*)kCMFormatDescriptionColorPrimaries_ITU_R_709_2;
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_TransferFunction] = (__bridge NSString*)kCMFormatDescriptionTransferFunction_ITU_R_709_2;
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_YCbCrMatrix] = (__bridge NSString*)kCMFormatDescriptionTransferFunction_ITU_R_709_2;
|
|
break;
|
|
case COLORSPACE_REC_2020:
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_ColorPrimaries] = (__bridge NSString*)kCMFormatDescriptionColorPrimaries_ITU_R_2020;
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_YCbCrMatrix] = (__bridge NSString*)kCMFormatDescriptionYCbCrMatrix_ITU_R_2020;
|
|
if (du->hdrActive) {
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_TransferFunction] = (__bridge NSString*)kCMFormatDescriptionTransferFunction_SMPTE_ST_2084_PQ;
|
|
}
|
|
else {
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_TransferFunction] = (__bridge NSString*)kCMFormatDescriptionTransferFunction_ITU_R_2020;
|
|
}
|
|
break;
|
|
}
|
|
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_FullRangeVideo] = @(NO);
|
|
|
|
if (contentLightLevelInfo) {
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_ContentLightLevelInfo] = contentLightLevelInfo;
|
|
}
|
|
|
|
if (masteringDisplayColorVolume) {
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_MasteringDisplayColorVolume] = masteringDisplayColorVolume;
|
|
}
|
|
|
|
// Referenced the VP9 code in Chrome that performs a similar function
|
|
// https://source.chromium.org/chromium/chromium/src/+/main:media/gpu/mac/vt_config_util.mm;drc=977dc02c431b4979e34c7792bc3d646f649dacb4;l=155
|
|
extensions[(__bridge NSString*)kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms] =
|
|
@{
|
|
@"av1C" : [self getAv1CodecConfigurationBox:frameData],
|
|
};
|
|
extensions[@"BitsPerComponent"] = @((videoFormat & VIDEO_FORMAT_MASK_10BIT) ? 10 : 8);
|
|
|
|
return extensions;
|
|
}
|
|
|
|
// This function must free data for bufferType == BUFFER_TYPE_PICDATA
|
|
- (int)submitDecodeBuffer:(unsigned char *)data length:(int)length bufferType:(int)bufferType decodeUnit:(PDECODE_UNIT)du
|
|
{
|
|
OSStatus status;
|
|
|
|
// Construct a new format description object each time we receive an IDR frame
|
|
if (du->frameType == FRAME_TYPE_IDR) {
|
|
if (bufferType != BUFFER_TYPE_PICDATA) {
|
|
if (bufferType == BUFFER_TYPE_VPS || bufferType == BUFFER_TYPE_SPS || bufferType == BUFFER_TYPE_PPS) {
|
|
// Add new parameter set into the parameter set array
|
|
int startLen = data[2] == 0x01 ? 3 : 4;
|
|
[parameterSetBuffers addObject:[NSData dataWithBytes:&data[startLen] length:length - startLen]];
|
|
|
|
// We must reconstruct the format description if we get new parameter sets
|
|
formatDesc = NULL;
|
|
}
|
|
|
|
// Data is NOT to be freed here. It's a direct usage of the caller's buffer.
|
|
|
|
// No frame data to submit for these NALUs
|
|
return DR_OK;
|
|
}
|
|
else if (formatDesc == NULL) {
|
|
// Create the new format description when we get the first picture data buffer of an IDR frame.
|
|
// This is the only way we know that there is no more CSD for this frame.
|
|
if (videoFormat & VIDEO_FORMAT_MASK_H264) {
|
|
// Construct parameter set arrays for the format description
|
|
size_t parameterSetCount = [parameterSetBuffers count];
|
|
const uint8_t* parameterSetPointers[parameterSetCount];
|
|
size_t parameterSetSizes[parameterSetCount];
|
|
for (int i = 0; i < parameterSetCount; i++) {
|
|
NSData* parameterSet = parameterSetBuffers[i];
|
|
parameterSetPointers[i] = parameterSet.bytes;
|
|
parameterSetSizes[i] = parameterSet.length;
|
|
}
|
|
|
|
Log(LOG_I, @"Constructing new H264 format description");
|
|
status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault,
|
|
parameterSetCount,
|
|
parameterSetPointers,
|
|
parameterSetSizes,
|
|
NAL_LENGTH_PREFIX_SIZE,
|
|
&formatDesc);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"Failed to create H264 format description: %d", (int)status);
|
|
formatDesc = NULL;
|
|
}
|
|
|
|
// Free parameter set buffers after submission
|
|
[parameterSetBuffers removeAllObjects];
|
|
}
|
|
else if (videoFormat & VIDEO_FORMAT_MASK_H265) {
|
|
// Construct parameter set arrays for the format description
|
|
size_t parameterSetCount = [parameterSetBuffers count];
|
|
const uint8_t* parameterSetPointers[parameterSetCount];
|
|
size_t parameterSetSizes[parameterSetCount];
|
|
for (int i = 0; i < parameterSetCount; i++) {
|
|
NSData* parameterSet = parameterSetBuffers[i];
|
|
parameterSetPointers[i] = parameterSet.bytes;
|
|
parameterSetSizes[i] = parameterSet.length;
|
|
}
|
|
|
|
Log(LOG_I, @"Constructing new HEVC format description");
|
|
|
|
NSMutableDictionary* videoFormatParams = [[NSMutableDictionary alloc] init];
|
|
|
|
if (contentLightLevelInfo) {
|
|
[videoFormatParams setObject:contentLightLevelInfo forKey:(__bridge NSString*)kCMFormatDescriptionExtension_ContentLightLevelInfo];
|
|
}
|
|
|
|
if (masteringDisplayColorVolume) {
|
|
[videoFormatParams setObject:masteringDisplayColorVolume forKey:(__bridge NSString*)kCMFormatDescriptionExtension_MasteringDisplayColorVolume];
|
|
}
|
|
|
|
status = CMVideoFormatDescriptionCreateFromHEVCParameterSets(kCFAllocatorDefault,
|
|
parameterSetCount,
|
|
parameterSetPointers,
|
|
parameterSetSizes,
|
|
NAL_LENGTH_PREFIX_SIZE,
|
|
(__bridge CFDictionaryRef)videoFormatParams,
|
|
&formatDesc);
|
|
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"Failed to create HEVC format description: %d", (int)status);
|
|
formatDesc = NULL;
|
|
}
|
|
|
|
// Free parameter set buffers after submission
|
|
[parameterSetBuffers removeAllObjects];
|
|
}
|
|
#if defined(__IPHONE_16_0) || defined(__TVOS_16_0)
|
|
else if (videoFormat & VIDEO_FORMAT_MASK_AV1) {
|
|
Log(LOG_I, @"Constructing new AV1 format description");
|
|
|
|
// AV1 doesn't have a special format description function like H.264 and HEVC have, so we just use the generic one
|
|
NSData* fullFrameData = [NSData dataWithBytesNoCopy:data length:length freeWhenDone:NO];
|
|
NSDictionary* extensions = [self createAV1FormatExtensionsDictionaryForDU:du frameData:fullFrameData];
|
|
status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault, kCMVideoCodecType_AV1,
|
|
videoWidth, videoHeight,
|
|
(__bridge CFDictionaryRef)extensions,
|
|
&formatDesc);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"Failed to create AV1 format description: %d", (int)status);
|
|
formatDesc = NULL;
|
|
}
|
|
}
|
|
#endif
|
|
else {
|
|
// Unsupported codec!
|
|
abort();
|
|
}
|
|
}
|
|
}
|
|
|
|
if (formatDesc == NULL) {
|
|
// Can't decode if we haven't gotten our parameter sets yet
|
|
free(data);
|
|
return DR_NEED_IDR;
|
|
}
|
|
|
|
// Check for previous decoder errors before doing anything
|
|
if (displayLayer.status == AVQueuedSampleBufferRenderingStatusFailed) {
|
|
Log(LOG_E, @"Display layer rendering failed: %@", displayLayer.error);
|
|
|
|
// Recreate the display layer. We are already on the main thread,
|
|
// so this is safe to do right here.
|
|
[self reinitializeDisplayLayer];
|
|
|
|
// Request an IDR frame to initialize the new decoder
|
|
free(data);
|
|
return DR_NEED_IDR;
|
|
}
|
|
|
|
// Now we're decoding actual frame data here
|
|
CMBlockBufferRef frameBlockBuffer;
|
|
CMBlockBufferRef dataBlockBuffer;
|
|
|
|
status = CMBlockBufferCreateWithMemoryBlock(NULL, data, length, kCFAllocatorDefault, NULL, 0, length, 0, &dataBlockBuffer);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferCreateWithMemoryBlock failed: %d", (int)status);
|
|
free(data);
|
|
return DR_NEED_IDR;
|
|
}
|
|
|
|
// From now on, CMBlockBuffer owns the data pointer and will free it when it's dereferenced
|
|
|
|
status = CMBlockBufferCreateEmpty(NULL, 0, 0, &frameBlockBuffer);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferCreateEmpty failed: %d", (int)status);
|
|
CFRelease(dataBlockBuffer);
|
|
return DR_NEED_IDR;
|
|
}
|
|
|
|
// H.264 and HEVC formats require NAL prefix fixups from Annex B to length-delimited
|
|
if (videoFormat & (VIDEO_FORMAT_MASK_H264 | VIDEO_FORMAT_MASK_H265)) {
|
|
int lastOffset = -1;
|
|
for (int i = 0; i < length - NALU_START_PREFIX_SIZE; i++) {
|
|
// Search for a NALU
|
|
if (data[i] == 0 && data[i+1] == 0 && data[i+2] == 1) {
|
|
// It's the start of a new NALU
|
|
if (lastOffset != -1) {
|
|
// We've seen a start before this so enqueue that NALU
|
|
[self updateAnnexBBufferForRange:frameBlockBuffer dataBlock:dataBlockBuffer offset:lastOffset length:i - lastOffset];
|
|
}
|
|
|
|
lastOffset = i;
|
|
}
|
|
}
|
|
|
|
if (lastOffset != -1) {
|
|
// Enqueue the remaining data
|
|
[self updateAnnexBBufferForRange:frameBlockBuffer dataBlock:dataBlockBuffer offset:lastOffset length:length - lastOffset];
|
|
}
|
|
}
|
|
else {
|
|
// For formats that require no length-changing fixups, just append a reference to the raw data block
|
|
status = CMBlockBufferAppendBufferReference(frameBlockBuffer, dataBlockBuffer, 0, length, 0);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMBlockBufferAppendBufferReference failed: %d", (int)status);
|
|
return DR_NEED_IDR;
|
|
}
|
|
}
|
|
|
|
CMSampleBufferRef sampleBuffer;
|
|
|
|
CMSampleTimingInfo sampleTiming = {kCMTimeInvalid, CMTimeMake(du->presentationTimeMs, 1000), kCMTimeInvalid};
|
|
|
|
status = CMSampleBufferCreateReady(kCFAllocatorDefault,
|
|
frameBlockBuffer,
|
|
formatDesc, 1, 1,
|
|
&sampleTiming, 0, NULL,
|
|
&sampleBuffer);
|
|
if (status != noErr) {
|
|
Log(LOG_E, @"CMSampleBufferCreate failed: %d", (int)status);
|
|
CFRelease(dataBlockBuffer);
|
|
CFRelease(frameBlockBuffer);
|
|
return DR_NEED_IDR;
|
|
}
|
|
|
|
// Enqueue the next frame
|
|
[self->displayLayer enqueueSampleBuffer:sampleBuffer];
|
|
|
|
if (du->frameType == FRAME_TYPE_IDR) {
|
|
// Ensure the layer is visible now
|
|
self->displayLayer.hidden = NO;
|
|
|
|
// Tell our parent VC to hide the progress indicator
|
|
[self->_callbacks videoContentShown];
|
|
}
|
|
|
|
// Dereference the buffers
|
|
CFRelease(dataBlockBuffer);
|
|
CFRelease(frameBlockBuffer);
|
|
CFRelease(sampleBuffer);
|
|
|
|
return DR_OK;
|
|
}
|
|
|
|
- (void)setHdrMode:(BOOL)enabled {
|
|
SS_HDR_METADATA hdrMetadata;
|
|
|
|
BOOL hasMetadata = enabled && LiGetHdrMetadata(&hdrMetadata);
|
|
BOOL metadataChanged = NO;
|
|
|
|
if (hasMetadata && hdrMetadata.displayPrimaries[0].x != 0 && hdrMetadata.maxDisplayLuminance != 0) {
|
|
// This data is all in big-endian
|
|
struct {
|
|
vector_ushort2 primaries[3];
|
|
vector_ushort2 white_point;
|
|
uint32_t luminance_max;
|
|
uint32_t luminance_min;
|
|
} __attribute__((packed, aligned(4))) mdcv;
|
|
|
|
// mdcv is in GBR order while SS_HDR_METADATA is in RGB order
|
|
mdcv.primaries[0].x = __builtin_bswap16(hdrMetadata.displayPrimaries[1].x);
|
|
mdcv.primaries[0].y = __builtin_bswap16(hdrMetadata.displayPrimaries[1].y);
|
|
mdcv.primaries[1].x = __builtin_bswap16(hdrMetadata.displayPrimaries[2].x);
|
|
mdcv.primaries[1].y = __builtin_bswap16(hdrMetadata.displayPrimaries[2].y);
|
|
mdcv.primaries[2].x = __builtin_bswap16(hdrMetadata.displayPrimaries[0].x);
|
|
mdcv.primaries[2].y = __builtin_bswap16(hdrMetadata.displayPrimaries[0].y);
|
|
|
|
mdcv.white_point.x = __builtin_bswap16(hdrMetadata.whitePoint.x);
|
|
mdcv.white_point.y = __builtin_bswap16(hdrMetadata.whitePoint.y);
|
|
|
|
// These luminance values are in 10000ths of a nit
|
|
mdcv.luminance_max = __builtin_bswap32((uint32_t)hdrMetadata.maxDisplayLuminance * 10000);
|
|
mdcv.luminance_min = __builtin_bswap32(hdrMetadata.minDisplayLuminance);
|
|
|
|
NSData* newMdcv = [NSData dataWithBytes:&mdcv length:sizeof(mdcv)];
|
|
if (masteringDisplayColorVolume == nil || ![newMdcv isEqualToData:masteringDisplayColorVolume]) {
|
|
masteringDisplayColorVolume = newMdcv;
|
|
metadataChanged = YES;
|
|
}
|
|
}
|
|
else if (masteringDisplayColorVolume != nil) {
|
|
masteringDisplayColorVolume = nil;
|
|
metadataChanged = YES;
|
|
}
|
|
|
|
if (hasMetadata && hdrMetadata.maxContentLightLevel != 0 && hdrMetadata.maxFrameAverageLightLevel != 0) {
|
|
// This data is all in big-endian
|
|
struct {
|
|
uint16_t max_content_light_level;
|
|
uint16_t max_frame_average_light_level;
|
|
} __attribute__((packed, aligned(2))) cll;
|
|
|
|
cll.max_content_light_level = __builtin_bswap16(hdrMetadata.maxContentLightLevel);
|
|
cll.max_frame_average_light_level = __builtin_bswap16(hdrMetadata.maxFrameAverageLightLevel);
|
|
|
|
NSData* newCll = [NSData dataWithBytes:&cll length:sizeof(cll)];
|
|
if (contentLightLevelInfo == nil || ![newCll isEqualToData:contentLightLevelInfo]) {
|
|
contentLightLevelInfo = newCll;
|
|
metadataChanged = YES;
|
|
}
|
|
}
|
|
else if (contentLightLevelInfo != nil) {
|
|
contentLightLevelInfo = nil;
|
|
metadataChanged = YES;
|
|
}
|
|
|
|
// If the metadata changed, request an IDR frame to re-create the CMVideoFormatDescription
|
|
if (metadataChanged) {
|
|
LiRequestIdrFrame();
|
|
}
|
|
}
|
|
|
|
@end
|