forked from rambod-rahmani/ffmpeg-video-player
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtutorial05.c
2304 lines (1958 loc) · 68 KB
/
tutorial05.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/**
*
* File: tutorial05.c
* This tutorial adds video to audio synching to the player coded in tutorial04-resampled.c
*
* Compiled using
* gcc -o tutorial05 tutorial05.c -lavutil -lavformat -lavcodec -lswscale -lswresample -lz -lm `sdl2-config --cflags --libs`
* on Arch Linux.
*
* Please refer to previous tutorials for uncommented lines of code.
*
* Author: Rambod Rahmani <[email protected]>
* Created on 8/22/18.
*
**/
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <time.h>
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/avstring.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <SDL2/SDL.h>
#include <SDL2/SDL_thread.h>
/**
* Prevents SDL from overriding main().
*/
#ifdef __MINGW32__
#undef main
#endif
/**
* SDL audio buffer size in samples.
*/
#define SDL_AUDIO_BUFFER_SIZE 1024
/**
* Maximum number of samples per channel in an audio frame.
*/
#define MAX_AUDIO_FRAME_SIZE 192000
/**
* Audio packets queue maximum size.
*/
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
/**
* Video packets queue maximum size.
*/
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
/**
* AV sync correction is done if the clock difference is above the maximum AV sync threshold.
*/
#define AV_SYNC_THRESHOLD 0.01
/**
* No AV sync correction is done if the clock difference is below the minimum AV sync threshold.
*/
#define AV_NOSYNC_THRESHOLD 1.0
/**
* Custom SDL_Event type.
* Notifies the next video frame has to be displayed.
*/
#define FF_REFRESH_EVENT (SDL_USEREVENT)
/**
* Custom SDL_Event type.
* Notifies the program needs to quit.
*/
#define FF_QUIT_EVENT (SDL_USEREVENT + 1)
/**
* Video Frame queue size.
*/
#define VIDEO_PICTURE_QUEUE_SIZE 1
/**
* Queue structure used to store AVPackets.
*/
typedef struct PacketQueue
{
AVPacketList * first_pkt;
AVPacketList * last_pkt;
int nb_packets;
int size;
SDL_mutex * mutex;
SDL_cond * cond;
} PacketQueue;
/**
* Queue structure used to store processed video frames.
*
* The only thing that changes about queue_picture is that we save that pts value
* to the VideoPicture structure that we queue up. So we have to add a pts
* variable to the struct and add a line of code:
*/
typedef struct VideoPicture
{
AVFrame * frame;
int width;
int height;
int allocated;
double pts;
} VideoPicture;
/**
* Struct used to hold the format context, the indices of the audio and video stream,
* the corresponding AVStream objects, the audio and video codec information,
* the audio and video queues and buffers, the global quit flag and the filename of
* the movie.
*/
typedef struct VideoState
{
/**
* File I/O Context.
*/
AVFormatContext * pFormatCtx;
/**
* Audio Stream.
*/
int audioStream;
AVStream * audio_st;
AVCodecContext * audio_ctx;
PacketQueue audioq;
uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) /2];
unsigned int audio_buf_size;
unsigned int audio_buf_index;
AVFrame audio_frame;
AVPacket audio_pkt;
uint8_t * audio_pkt_data;
int audio_pkt_size;
double audio_clock;
int audio_hw_buf_size;
/**
* Video Stream.
*/
int videoStream;
AVStream * video_st;
AVCodecContext * video_ctx;
SDL_Texture * texture;
SDL_Renderer * renderer;
PacketQueue videoq;
struct swsContext * sws_ctx;
double frame_timer;
double frame_last_pts;
double frame_last_delay;
double video_clock;
/**
* VideoPicture Queue.
*/
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size;
int pictq_rindex;
int pictq_windex;
SDL_mutex * pictq_mutex;
SDL_cond * pictq_cond;
/**
* Threads.
*/
SDL_Thread * decode_tid;
SDL_Thread * video_tid;
/**
* Input file name.
*/
char filename[1024];
/**
* Global quit flag.
*/
int quit;
/**
* Maximum number of frames to be decoded.
*/
long maxFramesToDecode;
int currentFrameIndex;
} VideoState;
/**
* Struct used to hold data fields used for audio resampling.
*/
typedef struct AudioResamplingState
{
SwrContext * swr_ctx;
int64_t in_channel_layout;
uint64_t out_channel_layout;
int out_nb_channels;
int out_linesize;
int in_nb_samples;
int64_t out_nb_samples;
int64_t max_out_nb_samples;
uint8_t ** resampled_data;
int resampled_data_size;
} AudioResamplingState;
/**
* Global SDL_Window reference.
*/
SDL_Window * screen;
/**
* Global SDL_Surface mutex reference.
*/
SDL_mutex * screen_mutex;
/**
* Global VideoState reference.
*/
VideoState * global_video_state;
/**
* Methods declaration.
*/
void printHelpMenu();
int decode_thread(void * arg);
int stream_component_open(
VideoState * videoState,
int stream_index
);
void alloc_picture(void * userdata);
int queue_picture(
VideoState * videoState,
AVFrame * pFrame,
double pts
);
int video_thread(void * arg);
static int64_t guess_correct_pts(
AVCodecContext * ctx,
int64_t reordered_pts,
int64_t dts
);
double synchronize_video(
VideoState * videoState,
AVFrame * src_frame,
double pts
);
void video_refresh_timer(void * userdata);
double get_audio_clock(VideoState * videoState);
static void schedule_refresh(
VideoState * videoState,
int delay
);
static Uint32 sdl_refresh_timer_cb(
Uint32 interval,
void * param
);
void video_display(VideoState * videoState);
void packet_queue_init(PacketQueue * q);
int packet_queue_put(
PacketQueue * queue,
AVPacket * packet
);
static int packet_queue_get(
PacketQueue * queue,
AVPacket * packet,
int blocking
);
void audio_callback(
void * userdata,
Uint8 * stream,
int len
);
int audio_decode_frame(
VideoState * videoState,
uint8_t * audio_buf,
int buf_size,
double * pts_ptr
);
static int audio_resampling(
VideoState * videoState,
AVFrame * decoded_audio_frame,
enum AVSampleFormat out_sample_fmt,
uint8_t * out_buf
);
AudioResamplingState * getAudioResampling(uint64_t channel_layout);
/**
* Entry point.
*
* @param argc command line arguments counter.
* @param argv command line arguments.
*
* @return execution exit code.
*/
int main(int argc, char * argv[])
{
// if the given number of command line arguments is wrong
if ( !(argc == 3) )
{
// print help menu and exit
printHelpMenu();
return -1;
}
int ret = -1;
/**
* Initialize SDL.
* New API: this implementation does not use deprecated SDL functionalities.
*/
ret = SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER);
if (ret != 0)
{
printf("Could not initialize SDL - %s\n.", SDL_GetError());
return -1;
}
// the global VideoState reference will be set in decode_thread() to this pointer
VideoState * videoState = NULL;
// allocate memory for the VideoState and zero it out
videoState = av_mallocz(sizeof(VideoState));
// copy the file name input by the user to the VideoState structure
av_strlcpy(videoState->filename, argv[1], sizeof(videoState->filename));
// parse max frames to decode input by the user
char * pEnd;
videoState->maxFramesToDecode = strtol(argv[2], &pEnd, 10);
// initialize locks for the display buffer (pictq)
videoState->pictq_mutex = SDL_CreateMutex();
videoState->pictq_cond = SDL_CreateCond();
// launch our threads by pushing an SDL_event of type FF_REFRESH_EVENT
schedule_refresh(videoState, 100);
// start the decoding thread to read data from the AVFormatContext
videoState->decode_tid = SDL_CreateThread(decode_thread, "Decoding Thread", videoState);
// check the decode thread was correctly started
if(!videoState->decode_tid)
{
printf("Could not start decoding SDL_Thread: %s.\n", SDL_GetError());
// free allocated memory before exiting
av_free(videoState);
return -1;
}
// infinite loop waiting for fired events
SDL_Event event;
for(;;)
{
// wait indefinitely for the next available event
ret = SDL_WaitEvent(&event);
if (ret == 0)
{
printf("SDL_WaitEvent failed: %s.\n", SDL_GetError());
}
// switch on the retrieved event type
switch(event.type)
{
case FF_QUIT_EVENT:
case SDL_QUIT:
{
videoState->quit = 1;
SDL_Quit();
}
break;
case FF_REFRESH_EVENT:
{
video_refresh_timer(event.user.data1);
}
break;
default:
{
// nothing to do
}
break;
}
// check global quit flag
if (videoState->quit)
{
// exit for loop
break;
}
}
// clean up memory
av_free(videoState);
return 0;
}
/**
* Print help menu containing usage information.
*/
void printHelpMenu()
{
printf("Invalid arguments.\n\n");
printf("Usage: ./tutorial05 <filename> <max-frames-to-decode>\n\n");
printf("e.g: ./tutorial05 /home/rambodrahmani/Videos/video.mp4 200\n");
}
/**
* This function is used as callback for the SDL_Thread.
*
* Opens Audio and Video Streams. If all codecs are retrieved correctly, starts
* an infinite loop to read AVPackets from the global VideoState AVFormatContext.
* Based on their stream index, each packet is placed in the appropriate queue.
*
* @param arg the data pointer passed to the SDL_Thread callback function.
*
* @return < 0 in case of error, 0 otherwise.
*/
int decode_thread(void * arg)
{
// retrieve global VideoState reference
VideoState * videoState = (VideoState *)arg;
int ret = -1;
// file I/O context: demuxers read a media file and split it into chunks of data (packets)
AVFormatContext * pFormatCtx = NULL;
ret = avformat_open_input(&pFormatCtx, videoState->filename, NULL, NULL);
if (ret < 0)
{
printf("Could not open file %s.\n", videoState->filename);
return -1;
}
// reset stream indexes
videoState->videoStream = -1;
videoState->audioStream = -1;
// set global VideoState reference
global_video_state = videoState;
// set the AVFormatContext for the global VideoState reference
videoState->pFormatCtx = pFormatCtx;
// read packets of the media file to get stream information
ret = avformat_find_stream_info(pFormatCtx, NULL);
if (ret < 0)
{
printf("Could not find stream information: %s.\n", videoState->filename);
return -1;
}
// dump information about file onto standard error
av_dump_format(pFormatCtx, 0, videoState->filename, 0);
// video and audio stream indexes
int videoStream = -1;
int audioStream = -1;
// loop through the streams that have been found
for (int i = 0; i < pFormatCtx->nb_streams; i++)
{
// look for the video stream
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0)
{
videoStream = i;
}
// look for the audio stream
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
{
audioStream = i;
}
}
// return with error in case no video stream was found
if (videoStream == -1)
{
printf("Could not find video stream.\n");
goto fail;
}
else
{
// open video stream component codec
ret = stream_component_open(videoState, videoStream);
// check video codec was opened correctly
if (ret < 0)
{
printf("Could not open video codec.\n");
goto fail;
}
}
// return with error in case no audio stream was found
if (audioStream == -1)
{
printf("Could not find audio stream.\n");
goto fail;
}
else
{
// open audio stream component codec
ret = stream_component_open(videoState, audioStream);
// check audio codec was opened correctly
if (ret < 0)
{
printf("Could not open audio codec.\n");
goto fail;
}
}
// check both the audio and video codecs were correctly retrieved
if (videoState->videoStream < 0 || videoState->audioStream < 0)
{
printf("Could not open codecs: %s.\n", videoState->filename);
goto fail;
}
// alloc the AVPacket used to read the media file
AVPacket * packet = av_packet_alloc();
if (packet == NULL)
{
printf("Could not alloc packet.\n");
return -1;
}
// main decode loop: read in a packet and put it on the right queue
for (;;)
{
// check global quit flag
if (videoState->quit)
{
break;
}
// check audio and video packets queues size
if (videoState->audioq.size > MAX_AUDIOQ_SIZE || videoState->videoq.size > MAX_VIDEOQ_SIZE)
{
// wait for audio and video queues to decrease size
SDL_Delay(10);
continue;
}
// read data from the AVFormatContext by repeatedly calling av_read_frame()
ret = av_read_frame(videoState->pFormatCtx, packet);
if (ret < 0)
{
if (ret == AVERROR_EOF)
{
// media EOF reached, quit
videoState->quit = 1;
break;
}
else if (videoState->pFormatCtx->pb->error == 0)
{
// no read error; wait for user input
SDL_Delay(10);
continue;
}
else
{
// exit for loop in case of error
break;
}
}
// put the packet in the appropriate queue
if (packet->stream_index == videoState->videoStream)
{
packet_queue_put(&videoState->videoq, packet);
}
else if (packet->stream_index == videoState->audioStream)
{
packet_queue_put(&videoState->audioq, packet);
}
else
{
// otherwise free the memory
av_packet_unref(packet);
}
}
// wait for the rest of the program to end
while (!videoState->quit)
{
SDL_Delay(100);
}
// close the opened input AVFormatContext
avformat_close_input(&pFormatCtx); // [5]
// in case of failure, push the FF_QUIT_EVENT and return
fail:
{
if (1)
{
// create an SDL_Event of type FF_QUIT_EVENT
SDL_Event event;
event.type = FF_QUIT_EVENT;
event.user.data1 = videoState;
// push the event to the events queue
SDL_PushEvent(&event);
// return with error
return -1;
}
};
return 0;
}
/**
* Retrieves the AVCodec and initializes the AVCodecContext for the given AVStream
* index. In case of AVMEDIA_TYPE_AUDIO codec type, it sets the desired audio specs,
* opens the audio device and starts playing.
*
* @param videoState the global VideoState reference used to save info
* related to the media being played.
* @param stream_index the stream index obtained from the AVFormatContext.
*
* @return < 0 in case of error, 0 otherwise.
*/
int stream_component_open(VideoState * videoState, int stream_index)
{
// retrieve file I/O context
AVFormatContext * pFormatCtx = videoState->pFormatCtx;
// check the given stream index is valid
if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams)
{
printf("Invalid stream index.");
return -1;
}
// retrieve codec for the given stream index
AVCodec * codec = NULL;
codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codecpar->codec_id);
if (codec == NULL)
{
printf("Unsupported codec.\n");
return -1;
}
// retrieve codec context
AVCodecContext * codecCtx = NULL;
codecCtx = avcodec_alloc_context3(codec);
int ret = avcodec_parameters_to_context(codecCtx, pFormatCtx->streams[stream_index]->codecpar);
if (ret != 0)
{
printf("Could not copy codec context.\n");
return -1;
}
if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
// desired and obtained audio specs references
SDL_AudioSpec wanted_specs;
SDL_AudioSpec specs;
// Set audio settings from codec info
wanted_specs.freq = codecCtx->sample_rate;
wanted_specs.format = AUDIO_S16SYS;
wanted_specs.channels = codecCtx->channels;
wanted_specs.silence = 0;
wanted_specs.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_specs.callback = audio_callback;
wanted_specs.userdata = videoState;
/* Deprecated, please refer to tutorial04-resampled.c for the new API */
// open audio device
ret = SDL_OpenAudio(&wanted_specs, &specs);
// check audio device was correctly opened
if (ret < 0)
{
printf("SDL_OpenAudio: %s.\n", SDL_GetError());
return -1;
}
}
// initialize the AVCodecContext to use the given AVCodec
if (avcodec_open2(codecCtx, codec, NULL) < 0)
{
printf("Unsupported codec.\n");
return -1;
}
// set up the global VideoState based on the type of the codec obtained for
// the given stream index
switch (codecCtx->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
{
// set VideoState audio related fields
videoState->audioStream = stream_index;
videoState->audio_st = pFormatCtx->streams[stream_index];
videoState->audio_ctx = codecCtx;
videoState->audio_buf_size = 0;
videoState->audio_buf_index = 0;
// zero out the block of memory pointed by videoState->audio_pkt
memset(&videoState->audio_pkt, 0, sizeof(videoState->audio_pkt));
// init audio packet queue
packet_queue_init(&videoState->audioq);
// start playing audio on the first audio device
SDL_PauseAudio(0);
}
break;
case AVMEDIA_TYPE_VIDEO:
{
// set VideoState video related fields
videoState->videoStream = stream_index;
videoState->video_st = pFormatCtx->streams[stream_index];
videoState->video_ctx = codecCtx;
// Don't forget to initialize the frame timer and the initial
// previous frame delay: 1ms = 1e-6s
videoState->frame_timer = (double)av_gettime() / 1000000.0; // [3]
videoState->frame_last_delay = 40e-3;
// init video packet queue
packet_queue_init(&videoState->videoq);
// start video thread
videoState->video_tid = SDL_CreateThread(video_thread, "Video Thread", videoState);
// set up the VideoState SWSContext to convert the image data to YUV420
videoState->sws_ctx = sws_getContext(videoState->video_ctx->width,
videoState->video_ctx->height,
videoState->video_ctx->pix_fmt,
videoState->video_ctx->width,
videoState->video_ctx->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// create a window with the specified position, dimensions, and flags.
screen = SDL_CreateWindow(
"FFmpeg SDL Video Player",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
codecCtx->width/2,
codecCtx->height/2,
SDL_WINDOW_OPENGL | SDL_WINDOW_ALLOW_HIGHDPI
);
// check window was correctly created
if (!screen)
{
printf("SDL: could not create window - exiting.\n");
return -1;
}
//
SDL_GL_SetSwapInterval(1);
// initialize global SDL_Surface mutex reference
screen_mutex = SDL_CreateMutex();
// create a 2D rendering context for the SDL_Window
videoState->renderer = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC | SDL_RENDERER_TARGETTEXTURE);
// create a texture for a rendering context
videoState->texture = SDL_CreateTexture(
videoState->renderer,
SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING,
videoState->video_ctx->width,
videoState->video_ctx->height
);
}
break;
default:
{
// nothing to do
}
break;
}
return 0;
}
/**
* Allocates a new SDL_Overlay for the VideoPicture struct referenced by the
* global VideoState struct reference.
* The remaining VideoPicture struct fields are also updated.
*
* @param userdata global VideoState reference.
*/
void alloc_picture(void * userdata)
{
// retrieve global VideoState reference.
VideoState * videoState = (VideoState *)userdata;
// retrieve the VideoPicture pointed by the queue write index
VideoPicture * videoPicture;
videoPicture = &videoState->pictq[videoState->pictq_windex];
// check if the SDL_Overlay is allocated
if (videoPicture->frame)
{
// we already have an AVFrame allocated, free memory
av_frame_free(&videoPicture->frame);
av_free(videoPicture->frame);
}
// lock global screen mutex
SDL_LockMutex(screen_mutex);
// get the size in bytes required to store an image with the given parameters
int numBytes;
numBytes = av_image_get_buffer_size(
AV_PIX_FMT_YUV420P,
videoState->video_ctx->width,
videoState->video_ctx->height,
32
);
// allocate image data buffer
uint8_t * buffer = NULL;
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
// alloc the AVFrame later used to contain the scaled frame
videoPicture->frame = av_frame_alloc();
if (videoPicture->frame == NULL)
{
printf("Could not allocate frame.\n");
return;
}
// The fields of the given image are filled in by using the buffer which points to the image data buffer.
av_image_fill_arrays(
videoPicture->frame->data,
videoPicture->frame->linesize,
buffer,
AV_PIX_FMT_YUV420P,
videoState->video_ctx->width,
videoState->video_ctx->height,
32
);
// unlock global screen mutex
SDL_UnlockMutex(screen_mutex);
// update VideoPicture struct fields
videoPicture->width = videoState->video_ctx->width;
videoPicture->height = videoState->video_ctx->height;
videoPicture->allocated = 1;
}
/**
* Waits for space in the VideoPicture queue. Allocates a new SDL_Overlay in case
* it is not already allocated or has a different width/height. Converts the given
* decoded AVFrame to an AVPicture using specs supported by SDL and writes it in the
* VideoPicture queue.
*
* @param videoState global VideoState reference.
* @param pFrame AVFrame to be inserted in the VideoState->pictq (as an AVPicture).
*
* @return < 0 in case the global quit flag is set, 0 otherwise.
*/
int queue_picture(VideoState * videoState, AVFrame * pFrame, double pts)
{
// lock VideoState->pictq mutex
SDL_LockMutex(videoState->pictq_mutex);
// wait until we have space for a new pic in VideoState->pictq
while (videoState->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !videoState->quit)
{
SDL_CondWait(videoState->pictq_cond, videoState->pictq_mutex);
}
// unlock VideoState->pictq mutex
SDL_UnlockMutex(videoState->pictq_mutex);
// check global quit flag
if (videoState->quit)
{
return -1;
}
// retrieve video picture using the queue write index
VideoPicture * videoPicture;
videoPicture = &videoState->pictq[videoState->pictq_windex];
// if the VideoPicture SDL_Overlay is not allocated or has a different width/height
if (!videoPicture->frame ||
videoPicture->width != videoState->video_ctx->width ||
videoPicture->height != videoState->video_ctx->height)
{
// set SDL_Overlay not allocated
videoPicture->allocated = 0;
// allocate a new SDL_Overlay for the VideoPicture struct
alloc_picture(videoState);
// check global quit flag
if(videoState->quit)
{
return -1;
}
}
// check the new SDL_Overlay was correctly allocated
if (videoPicture->frame)
{
/**
* So now we've got pictures lining up onto our picture queue with proper
* PTS values.
*/
videoPicture->pts = pts;
// set VideoPicture AVFrame info using the last decoded frame
videoPicture->frame->pict_type = pFrame->pict_type;
videoPicture->frame->pts = pFrame->pts;
videoPicture->frame->pkt_dts = pFrame->pkt_dts;
videoPicture->frame->key_frame = pFrame->key_frame;
videoPicture->frame->coded_picture_number = pFrame->coded_picture_number;
videoPicture->frame->display_picture_number = pFrame->display_picture_number;
videoPicture->frame->width = pFrame->width;
videoPicture->frame->height = pFrame->height;
// scale the image in pFrame->data and put the resulting scaled image in pict->data
sws_scale(
videoState->sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
videoState->video_ctx->height,
videoPicture->frame->data,
videoPicture->frame->linesize
);
// update VideoPicture queue write index
++videoState->pictq_windex;
// if the write index has reached the VideoPicture queue size
if(videoState->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
{
// set it to 0
videoState->pictq_windex = 0;
}
// lock VideoPicture queue
SDL_LockMutex(videoState->pictq_mutex);
// increase VideoPicture queue size
videoState->pictq_size++;
// unlock VideoPicture queue
SDL_UnlockMutex(videoState->pictq_mutex);
}
return 0;
}
/**
* This function is used as callback for the SDL_Thread.
*
* This thread reads in packets from the video queue, packet_queue_get(), decodes