mirror of
https://github.com/moonlight-stream/moonlight-ios.git
synced 2025-07-23 12:44:19 +00:00
Implement code for new video decoder (untested) based on http://stackoverflow.com/questions/25980070/how-to-use-avsamplebufferdisplaylayer-in-ios-8-for-rtp-h264-streams-with-gstream
This commit is contained in:
parent
4c3bc16593
commit
9b6865a53b
@ -7,10 +7,11 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
#import <Foundation/Foundation.h>
|
||||||
|
#import "VideoDecoderRenderer.h"
|
||||||
|
|
||||||
@interface Connection : NSOperation <NSStreamDelegate>
|
@interface Connection : NSOperation <NSStreamDelegate>
|
||||||
|
|
||||||
-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height;
|
-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height renderer:(VideoDecoderRenderer*)renderer;
|
||||||
-(void) main;
|
-(void) main;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
#import "Connection.h"
|
#import "Connection.h"
|
||||||
|
|
||||||
#import <AudioUnit/AudioUnit.h>
|
#import <AudioUnit/AudioUnit.h>
|
||||||
#import <AVFoundation/AVFoundation.h>
|
#import <AVFoundation/AVFoundation.h>
|
||||||
|
|
||||||
@ -29,10 +30,9 @@ static OpusDecoder *opusDecoder;
|
|||||||
|
|
||||||
static short* decodedPcmBuffer;
|
static short* decodedPcmBuffer;
|
||||||
static int filledPcmBuffer;
|
static int filledPcmBuffer;
|
||||||
NSLock* audioRendererBlock;
|
static AudioComponentInstance audioUnit;
|
||||||
AudioComponentInstance audioUnit;
|
static bool started = false;
|
||||||
bool started = false;
|
static VideoDecoderRenderer* renderer;
|
||||||
|
|
||||||
|
|
||||||
void DrSetup(int width, int height, int fps, void* context, int drFlags)
|
void DrSetup(int width, int height, int fps, void* context, int drFlags)
|
||||||
{
|
{
|
||||||
@ -52,7 +52,7 @@ void DrSubmitDecodeUnit(PDECODE_UNIT decodeUnit)
|
|||||||
entry = entry->next;
|
entry = entry->next;
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Submit data to decoder
|
[renderer submitDecodeBuffer:data length:decodeUnit->fullLength];
|
||||||
|
|
||||||
free(data);
|
free(data);
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ void ArDecodeAndPlaySample(char* sampleData, int sampleLength)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height
|
-(id) initWithHost:(int)ipaddr width:(int)width height:(int)height renderer:(VideoDecoderRenderer*)renderer
|
||||||
{
|
{
|
||||||
self = [super init];
|
self = [super init];
|
||||||
host = ipaddr;
|
host = ipaddr;
|
||||||
@ -237,7 +237,6 @@ static OSStatus playbackCallback(void *inRefCon,
|
|||||||
filledPcmBuffer -= min;
|
filledPcmBuffer -= min;
|
||||||
}
|
}
|
||||||
|
|
||||||
//[audioRendererBlock unlock];
|
|
||||||
return noErr;
|
return noErr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,12 +42,14 @@
|
|||||||
// Repositions and resizes the view.
|
// Repositions and resizes the view.
|
||||||
CGRect contentRect = CGRectMake(0,0, self.view.frame.size.width, self.view.frame.size.height);
|
CGRect contentRect = CGRectMake(0,0, self.view.frame.size.width, self.view.frame.size.height);
|
||||||
streamView.bounds = contentRect;
|
streamView.bounds = contentRect;
|
||||||
|
|
||||||
|
VideoDecoderRenderer* renderer = [[VideoDecoderRenderer alloc]init];
|
||||||
|
|
||||||
Connection* conn = [[Connection alloc] initWithHost:inet_addr([[ConnectionHandler resolveHost:[NSString stringWithUTF8String:[MainFrameViewController getHostAddr]]] UTF8String]) width:1280 height:720];
|
Connection* conn = [[Connection alloc] initWithHost:inet_addr([[ConnectionHandler resolveHost:[NSString stringWithUTF8String:[MainFrameViewController getHostAddr]]] UTF8String]) width:1280 height:720
|
||||||
|
renderer: renderer];
|
||||||
|
|
||||||
NSOperationQueue* opQueue = [[NSOperationQueue alloc] init];
|
NSOperationQueue* opQueue = [[NSOperationQueue alloc] init];
|
||||||
[opQueue addOperation:conn];
|
[opQueue addOperation:conn];
|
||||||
[opQueue addOperation:[[VideoDecoderRenderer alloc]initWithTarget:streamView]];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)didReceiveMemoryWarning
|
- (void)didReceiveMemoryWarning
|
||||||
|
@ -8,10 +8,12 @@
|
|||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
@interface VideoDecoderRenderer : NSOperation
|
@import AVFoundation;
|
||||||
|
|
||||||
- (id)initWithTarget:(UIView *)target;
|
@interface VideoDecoderRenderer : NSObject
|
||||||
|
|
||||||
@property UIView* renderTarget;
|
- (id)init;
|
||||||
|
|
||||||
|
- (void)submitDecodeBuffer:(unsigned char *)data length:(int)length;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
@ -8,20 +8,124 @@
|
|||||||
|
|
||||||
#import "VideoDecoderRenderer.h"
|
#import "VideoDecoderRenderer.h"
|
||||||
|
|
||||||
@implementation VideoDecoderRenderer
|
@implementation VideoDecoderRenderer {
|
||||||
|
AVSampleBufferDisplayLayer* displayLayer;
|
||||||
|
Boolean waitingForSps, waitingForPpsA, waitingForPpsB;
|
||||||
|
|
||||||
|
NSData *spsData, *ppsDataA, *ppsDataB;
|
||||||
|
CMVideoFormatDescriptionRef formatDesc;
|
||||||
|
}
|
||||||
|
|
||||||
- (id)initWithTarget:(UIView *)target
|
- (id)init
|
||||||
{
|
{
|
||||||
self = [super init];
|
self = [super init];
|
||||||
|
|
||||||
self.renderTarget = target;
|
displayLayer = [[AVSampleBufferDisplayLayer alloc] init];
|
||||||
|
displayLayer.bounds = CGRectMake(0, 0, 300, 300);
|
||||||
|
displayLayer.backgroundColor = [UIColor blackColor].CGColor;
|
||||||
|
displayLayer.position = CGPointMake(500, 500);
|
||||||
|
|
||||||
|
// We need some parameter sets before we can properly start decoding frames
|
||||||
|
waitingForSps = true;
|
||||||
|
waitingForPpsA = true;
|
||||||
|
waitingForPpsB = true;
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)main
|
#define ES_START_PREFIX_SIZE 4
|
||||||
|
#define ES_DATA_OFFSET 5
|
||||||
|
- (void)submitDecodeBuffer:(unsigned char *)data length:(int)length
|
||||||
{
|
{
|
||||||
NSLog(@"Hi");
|
unsigned char nalType = data[ES_START_PREFIX_SIZE] & 0x1F;
|
||||||
|
OSStatus status;
|
||||||
|
|
||||||
|
if (formatDesc == NULL && (nalType == 0x7 || nalType == 0x8)) {
|
||||||
|
if (waitingForSps && nalType == 0x7) {
|
||||||
|
spsData = [NSData dataWithBytes:&data[ES_DATA_OFFSET] length:length - ES_DATA_OFFSET];
|
||||||
|
waitingForSps = false;
|
||||||
|
}
|
||||||
|
// Nvidia's stream has 2 PPS NALUs so we'll wait for both of them
|
||||||
|
else if ((waitingForPpsA || waitingForPpsB) && nalType == 0x8) {
|
||||||
|
// Read the NALU's PPS index to figure out which PPS this is
|
||||||
|
if (data[ES_DATA_OFFSET] == 0) {
|
||||||
|
if (waitingForPpsA) {
|
||||||
|
ppsDataA = [NSData dataWithBytes:&data[ES_DATA_OFFSET] length:length - ES_DATA_OFFSET];
|
||||||
|
waitingForPpsA = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (data[ES_DATA_OFFSET] == 1) {
|
||||||
|
if (waitingForPpsB) {
|
||||||
|
ppsDataA = [NSData dataWithBytes:&data[ES_DATA_OFFSET] length:length - ES_DATA_OFFSET];
|
||||||
|
waitingForPpsB = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// See if we've got all the parameter sets we need
|
||||||
|
if (!waitingForSps && !waitingForPpsA && !waitingForPpsB) {
|
||||||
|
const uint8_t* const parameterSetPointers[] = { [spsData bytes], [ppsDataA bytes], [ppsDataB bytes] };
|
||||||
|
const size_t parameterSetSizes[] = { [spsData length], [ppsDataA length], [ppsDataB length] };
|
||||||
|
|
||||||
|
status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault,
|
||||||
|
3, /* count of parameter sets */
|
||||||
|
parameterSetPointers,
|
||||||
|
parameterSetSizes,
|
||||||
|
4 /* size of length prefix */,
|
||||||
|
&formatDesc);
|
||||||
|
if (status != noErr) {
|
||||||
|
NSLog(@"Failed to create format description: %d", (int)status);
|
||||||
|
formatDesc = NULL;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No frame data to submit for these NALUs
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (formatDesc == NULL) {
|
||||||
|
// Can't decode if we haven't gotten our parameter sets yet
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we're decoding actual frame data here
|
||||||
|
CMBlockBufferRef blockBuffer;
|
||||||
|
status = CMBlockBufferCreateWithMemoryBlock(NULL, data, length, kCFAllocatorNull, NULL, 0, length, 0, &blockBuffer);
|
||||||
|
if (status != noErr) {
|
||||||
|
NSLog(@"CMBlockBufferCreateWithMemoryBlock failed: %d", (int)status);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the new length prefix to replace the 00 00 00 01
|
||||||
|
const uint8_t lengthBytes[] = {(uint8_t)(length >> 24), (uint8_t)(length >> 16), (uint8_t)(length >> 8), (uint8_t)length};
|
||||||
|
status = CMBlockBufferReplaceDataBytes(lengthBytes, blockBuffer, 0, 4);
|
||||||
|
if (status != noErr) {
|
||||||
|
NSLog(@"CMBlockBufferReplaceDataBytes failed: %d", (int)status);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CMSampleBufferRef sampleBuffer;
|
||||||
|
const size_t sampleSizeArray[] = {length};
|
||||||
|
|
||||||
|
status = CMSampleBufferCreate(kCFAllocatorDefault,
|
||||||
|
blockBuffer, true, NULL,
|
||||||
|
NULL, formatDesc, 1, 0,
|
||||||
|
NULL, 1, sampleSizeArray,
|
||||||
|
&sampleBuffer);
|
||||||
|
if (status != noErr) {
|
||||||
|
NSLog(@"CMSampleBufferCreate failed: %d", (int)status);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, YES);
|
||||||
|
CFMutableDictionaryRef dict = (CFMutableDictionaryRef)CFArrayGetValueAtIndex(attachments, 0);
|
||||||
|
CFDictionarySetValue(dict, kCMSampleAttachmentKey_DisplayImmediately, kCFBooleanTrue);
|
||||||
|
|
||||||
|
dispatch_async(dispatch_get_main_queue(),^{
|
||||||
|
[displayLayer enqueueSampleBuffer:sampleBuffer];
|
||||||
|
[displayLayer setNeedsDisplay];
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
Loading…
x
Reference in New Issue
Block a user