Fix several "mixing declarations and code is incompatible with standards
before C99" warnings.
---
 libavdevice/avfoundation.m | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)

diff --git a/libavdevice/avfoundation.m b/libavdevice/avfoundation.m
index c5a09c6563..17900d39d9 100644
--- a/libavdevice/avfoundation.m
+++ b/libavdevice/avfoundation.m
@@ -680,6 +680,7 @@ static int get_audio_config(AVFormatContext *s)
 {
     AVFContext *ctx = (AVFContext*)s->priv_data;
     CMFormatDescriptionRef format_desc;
+    const AudioStreamBasicDescription *basic_desc;
     AVStream* stream = avformat_new_stream(s, NULL);
 
     if (!stream) {
@@ -698,7 +699,7 @@ static int get_audio_config(AVFormatContext *s)
     avpriv_set_pts_info(stream, 64, 1, avf_time_base);
 
     format_desc = CMSampleBufferGetFormatDescription(ctx->current_audio_frame);
-    const AudioStreamBasicDescription *basic_desc = 
CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
+    basic_desc = 
CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
 
     if (!basic_desc) {
         unlock_frames(ctx);
@@ -765,7 +766,9 @@ static int get_audio_config(AVFormatContext *s)
 
 static NSArray* getDevicesWithMediaType(AVMediaType mediaType) {
 #if ((TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000) || 
(TARGET_OS_OSX && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101500))
+    AVCaptureDeviceDiscoverySession *captureDeviceDiscoverySession;
     NSMutableArray *deviceTypes = nil;
+
     if (mediaType == AVMediaTypeVideo) {
         deviceTypes = [NSMutableArray 
arrayWithArray:@[AVCaptureDeviceTypeBuiltInWideAngleCamera]];
         #if (TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
@@ -810,7 +813,7 @@ static int get_audio_config(AVFormatContext *s)
         return nil;
     }
 
-    AVCaptureDeviceDiscoverySession *captureDeviceDiscoverySession =
+    captureDeviceDiscoverySession =
         [AVCaptureDeviceDiscoverySession
         discoverySessionWithDeviceTypes:deviceTypes
                               mediaType:mediaType
@@ -899,8 +902,9 @@ static int avf_read_header(AVFormatContext *s)
         } else if (ctx->video_device_index < ctx->num_video_devices + 
num_screens) {
 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
             CGDirectDisplayID screens[num_screens];
+            AVCaptureScreenInput* capture_screen_input;
             CGGetActiveDisplayList(num_screens, screens, &num_screens);
-            AVCaptureScreenInput* capture_screen_input = 
[[[AVCaptureScreenInput alloc] 
initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] 
autorelease];
+            capture_screen_input = [[[AVCaptureScreenInput alloc] 
initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] 
autorelease];
 
             if (ctx->framerate.num > 0) {
                 capture_screen_input.minFrameDuration = 
CMTimeMake(ctx->framerate.den, ctx->framerate.num);
@@ -954,8 +958,9 @@ static int avf_read_header(AVFormatContext *s)
             int idx;
             if(sscanf(ctx->video_filename, "Capture screen %d", &idx) && idx < 
num_screens) {
                 CGDirectDisplayID screens[num_screens];
+                AVCaptureScreenInput* capture_screen_input;
                 CGGetActiveDisplayList(num_screens, screens, &num_screens);
-                AVCaptureScreenInput* capture_screen_input = 
[[[AVCaptureScreenInput alloc] initWithDisplayID:screens[idx]] autorelease];
+                capture_screen_input = [[[AVCaptureScreenInput alloc] 
initWithDisplayID:screens[idx]] autorelease];
                 video_device = (AVCaptureDevice*) capture_screen_input;
                 ctx->video_device_index = ctx->num_video_devices + idx;
                 ctx->video_is_screen = 1;
@@ -1123,10 +1128,12 @@ static int avf_read_packet(AVFormatContext *s, AVPacket 
*pkt)
     do {
         CVImageBufferRef image_buffer;
         CMBlockBufferRef block_buffer;
+        CMItemCount count;
+        CMSampleTimingInfo timing_info;
         lock_frames(ctx);
 
         if (ctx->current_frame != nil) {
-            int status;
+            int status = 0;
             int length = 0;
 
             image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
@@ -1146,9 +1153,6 @@ static int avf_read_packet(AVFormatContext *s, AVPacket 
*pkt)
                 return AVERROR(EIO);
             }
 
-            CMItemCount count;
-            CMSampleTimingInfo timing_info;
-
             if 
(CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_frame, 1, 
&timing_info, &count) == noErr) {
                 AVRational timebase_q = av_make_q(1, 
timing_info.presentationTimeStamp.timescale);
                 pkt->pts = pkt->dts = 
av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, 
avf_time_base_q);
@@ -1160,7 +1164,6 @@ static int avf_read_packet(AVFormatContext *s, AVPacket 
*pkt)
             if (image_buffer) {
                 status = copy_cvpixelbuffer(s, image_buffer, pkt);
             } else {
-                status = 0;
                 OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, 
pkt->size, pkt->data);
                 if (ret != kCMBlockBufferNoErr) {
                     status = AVERROR(EIO);
@@ -1174,6 +1177,8 @@ static int avf_read_packet(AVFormatContext *s, AVPacket 
*pkt)
                 return status;
             }
         } else if (ctx->current_audio_frame != nil) {
+            CMItemCount count;
+            CMSampleTimingInfo timing_info;
             CMBlockBufferRef block_buffer = 
CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
             int block_buffer_size         = 
CMBlockBufferGetDataLength(block_buffer);
 
@@ -1192,9 +1197,6 @@ static int avf_read_packet(AVFormatContext *s, AVPacket 
*pkt)
                 return AVERROR(EIO);
             }
 
-            CMItemCount count;
-            CMSampleTimingInfo timing_info;
-
             if 
(CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_audio_frame, 1, 
&timing_info, &count) == noErr) {
                 AVRational timebase_q = av_make_q(1, 
timing_info.presentationTimeStamp.timescale);
                 pkt->pts = pkt->dts = 
av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, 
avf_time_base_q);
-- 
2.39.3 (Apple Git-146)


_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-requ...@ffmpeg.org with subject "unsubscribe".

Reply via email to