source: trunk/libavformat/utils.c@ 262

Last change on this file since 262 was 262, checked in by vladest, 18 years ago

FFMPEG update
First step to move from mplayer demuxers to libavformat
Fix FPS for libavformat
Fix crashing MOV files
Fix crashing Real Media files

File size: 94.0 KB
Line 
1/*
2 * Various utilities for ffmpeg system
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21#include "avformat.h"
22#include "allformats.h"
23#include "opt.h"
24
25#undef NDEBUG
26#include <assert.h>
27
28/**
29 * @file libavformat/utils.c
30 * Various utility functions for using ffmpeg library.
31 */
32
33static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
34static void av_frac_add(AVFrac *f, int64_t incr);
35static void av_frac_set(AVFrac *f, int64_t val);
36
37/** head of registered input format linked list. */
38AVInputFormat *first_iformat = NULL;
39/** head of registered output format linked list. */
40AVOutputFormat *first_oformat = NULL;
41
42void av_register_input_format(AVInputFormat *format)
43{
44 AVInputFormat **p;
45 p = &first_iformat;
46 while (*p != NULL) p = &(*p)->next;
47 *p = format;
48 format->next = NULL;
49}
50
51void av_register_output_format(AVOutputFormat *format)
52{
53 AVOutputFormat **p;
54 p = &first_oformat;
55 while (*p != NULL) p = &(*p)->next;
56 *p = format;
57 format->next = NULL;
58}
59
60int match_ext(const char *filename, const char *extensions)
61{
62 const char *ext, *p;
63 char ext1[32], *q;
64
65 if(!filename)
66 return 0;
67
68 ext = strrchr(filename, '.');
69 if (ext) {
70 ext++;
71 p = extensions;
72 for(;;) {
73 q = ext1;
74 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
75 *q++ = *p++;
76 *q = '\0';
77 if (!strcasecmp(ext1, ext))
78 return 1;
79 if (*p == '\0')
80 break;
81 p++;
82 }
83 }
84 return 0;
85}
86
87AVOutputFormat *guess_format(const char *short_name, const char *filename,
88 const char *mime_type)
89{
90 AVOutputFormat *fmt, *fmt_found;
91 int score_max, score;
92
93 /* specific test for image sequences */
94#ifdef CONFIG_IMAGE2_MUXER
95 if (!short_name && filename &&
96 av_filename_number_test(filename) &&
97 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
98 return guess_format("image2", NULL, NULL);
99 }
100#endif
101 /* find the proper file type */
102 fmt_found = NULL;
103 score_max = 0;
104 fmt = first_oformat;
105 while (fmt != NULL) {
106 score = 0;
107 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
108 score += 100;
109 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
110 score += 10;
111 if (filename && fmt->extensions &&
112 match_ext(filename, fmt->extensions)) {
113 score += 5;
114 }
115 if (score > score_max) {
116 score_max = score;
117 fmt_found = fmt;
118 }
119 fmt = fmt->next;
120 }
121 return fmt_found;
122}
123
124AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
125 const char *mime_type)
126{
127 AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
128
129 if (fmt) {
130 AVOutputFormat *stream_fmt;
131 char stream_format_name[64];
132
133 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
134 stream_fmt = guess_format(stream_format_name, NULL, NULL);
135
136 if (stream_fmt)
137 fmt = stream_fmt;
138 }
139
140 return fmt;
141}
142
143/**
144 * Guesses the codec id based upon muxer and filename.
145 */
146enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
147 const char *filename, const char *mime_type, enum CodecType type){
148 if(type == CODEC_TYPE_VIDEO){
149 enum CodecID codec_id= CODEC_ID_NONE;
150
151#ifdef CONFIG_IMAGE2_MUXER
152 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
153 codec_id= av_guess_image2_codec(filename);
154 }
155#endif
156 if(codec_id == CODEC_ID_NONE)
157 codec_id= fmt->video_codec;
158 return codec_id;
159 }else if(type == CODEC_TYPE_AUDIO)
160 return fmt->audio_codec;
161 else
162 return CODEC_ID_NONE;
163}
164
165/**
166 * finds AVInputFormat based on input format's short name.
167 */
168AVInputFormat *av_find_input_format(const char *short_name)
169{
170 AVInputFormat *fmt;
171 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
172 if (!strcmp(fmt->name, short_name))
173 return fmt;
174 }
175 return NULL;
176}
177
178/* memory handling */
179
180/**
181 * Default packet destructor.
182 */
183void av_destruct_packet(AVPacket *pkt)
184{
185 av_free(pkt->data);
186 pkt->data = NULL; pkt->size = 0;
187}
188
189/**
190 * Allocate the payload of a packet and intialized its fields to default values.
191 *
192 * @param pkt packet
193 * @param size wanted payload size
194 * @return 0 if OK. AVERROR_xxx otherwise.
195 */
196int av_new_packet(AVPacket *pkt, int size)
197{
198 uint8_t *data;
199 if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
200 return AVERROR_NOMEM;
201 data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
202 if (!data)
203 return AVERROR_NOMEM;
204 memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
205
206 av_init_packet(pkt);
207 pkt->data = data;
208 pkt->size = size;
209 pkt->destruct = av_destruct_packet;
210 return 0;
211}
212
213/**
214 * Allocate and read the payload of a packet and intialized its fields to default values.
215 *
216 * @param pkt packet
217 * @param size wanted payload size
218 * @return >0 (read size) if OK. AVERROR_xxx otherwise.
219 */
220int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
221{
222 int ret= av_new_packet(pkt, size);
223
224 if(ret<0)
225 return ret;
226
227 pkt->pos= url_ftell(s);
228
229 ret= get_buffer(s, pkt->data, size);
230 if(ret<=0)
231 av_free_packet(pkt);
232 else
233 pkt->size= ret;
234
235 return ret;
236}
237
238/* This is a hack - the packet memory allocation stuff is broken. The
239 packet is allocated if it was not really allocated */
240int av_dup_packet(AVPacket *pkt)
241{
242 if (pkt->destruct != av_destruct_packet) {
243 uint8_t *data;
244 /* we duplicate the packet and don't forget to put the padding
245 again */
246 if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
247 return AVERROR_NOMEM;
248 data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
249 if (!data) {
250 return AVERROR_NOMEM;
251 }
252 memcpy(data, pkt->data, pkt->size);
253 memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
254 pkt->data = data;
255 pkt->destruct = av_destruct_packet;
256 }
257 return 0;
258}
259
260/**
261 * Allocate the payload of a packet and intialized its fields to default values.
262 *
263 * @param filename possible numbered sequence string
264 * @return 1 if a valid numbered sequence string, 0 otherwise.
265 */
266int av_filename_number_test(const char *filename)
267{
268 char buf[1024];
269 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
270}
271
272/**
273 * Guess file format.
274 */
275AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
276{
277 AVInputFormat *fmt1, *fmt;
278 int score, score_max, i=0;
279
280 fmt = NULL;
281 score_max = 0;
282 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
283 if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
284 continue;
285 i++;
286 score = 0;
287 if (fmt1->read_probe) {
288 score = fmt1->read_probe(pd);
289 } else if (fmt1->extensions) {
290 if (match_ext(pd->filename, fmt1->extensions)) {
291 score = 50;
292 }
293 }
294 if (score > score_max) {
295 score_max = score;
296 fmt = fmt1;
297 }
298 }
299 return fmt;
300}
301
302/************************************************************/
303/* input media file */
304
305/**
306 * Open a media file from an IO stream. 'fmt' must be specified.
307 */
308static const char* format_to_name(void* ptr)
309{
310 AVFormatContext* fc = (AVFormatContext*) ptr;
311 if(fc->iformat) return fc->iformat->name;
312 else if(fc->oformat) return fc->oformat->name;
313 else return "NULL";
314}
315
316#define OFFSET(x) offsetof(AVFormatContext,x)
317#define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
318//these names are too long to be readable
319#define E AV_OPT_FLAG_ENCODING_PARAM
320#define D AV_OPT_FLAG_DECODING_PARAM
321
322static const AVOption options[]={
323{"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
324{"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
325{"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
326{"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
327{"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
328{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
329{"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
330{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
331{NULL},
332};
333
334#undef E
335#undef D
336#undef DEFAULT
337
338static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
339
340#if LIBAVFORMAT_VERSION_INT >= ((51<<16)+(0<<8)+0)
341static
342#endif
343void avformat_get_context_defaults(AVFormatContext *s){
344 memset(s, 0, sizeof(AVFormatContext));
345
346 s->av_class = &av_format_context_class;
347
348 av_opt_set_defaults(s);
349}
350
351AVFormatContext *av_alloc_format_context(void)
352{
353 AVFormatContext *ic;
354 ic = av_malloc(sizeof(AVFormatContext));
355 if (!ic) return ic;
356 avformat_get_context_defaults(ic);
357 ic->av_class = &av_format_context_class;
358 return ic;
359}
360
361/**
362 * Allocates all the structures needed to read an input stream.
363 * This does not open the needed codecs for decoding the stream[s].
364 */
365int av_open_input_stream(AVFormatContext **ic_ptr,
366 ByteIOContext *pb, const char *filename,
367 AVInputFormat *fmt, AVFormatParameters *ap)
368{
369 int err;
370 AVFormatContext *ic;
371 AVFormatParameters default_ap;
372
373 if(!ap){
374 ap=&default_ap;
375 memset(ap, 0, sizeof(default_ap));
376 }
377
378 if(!ap->prealloced_context)
379 ic = av_alloc_format_context();
380 else
381 ic = *ic_ptr;
382 if (!ic) {
383 err = AVERROR_NOMEM;
384 goto fail;
385 }
386 ic->iformat = fmt;
387 if (pb)
388 ic->pb = *pb;
389 ic->duration = AV_NOPTS_VALUE;
390 ic->start_time = AV_NOPTS_VALUE;
391 pstrcpy(ic->filename, sizeof(ic->filename), filename);
392
393 /* allocate private data */
394 if (fmt->priv_data_size > 0) {
395 ic->priv_data = av_mallocz(fmt->priv_data_size);
396 if (!ic->priv_data) {
397 err = AVERROR_NOMEM;
398 goto fail;
399 }
400 } else {
401 ic->priv_data = NULL;
402 }
403
404 err = ic->iformat->read_header(ic, ap);
405 if (err < 0)
406 goto fail;
407
408 if (pb)
409 ic->data_offset = url_ftell(&ic->pb);
410
411 *ic_ptr = ic;
412 return 0;
413 fail:
414 if (ic) {
415 av_freep(&ic->priv_data);
416 }
417 av_free(ic);
418 *ic_ptr = NULL;
419 return err;
420}
421
422/** Size of probe buffer, for guessing file type from file contents. */
423#define PROBE_BUF_MIN 2048
424#define PROBE_BUF_MAX (1<<20)
425
426/**
427 * Open a media file as input. The codec are not opened. Only the file
428 * header (if present) is read.
429 *
430 * @param ic_ptr the opened media file handle is put here
431 * @param filename filename to open.
432 * @param fmt if non NULL, force the file format to use
433 * @param buf_size optional buffer size (zero if default is OK)
434 * @param ap additionnal parameters needed when opening the file (NULL if default)
435 * @return 0 if OK. AVERROR_xxx otherwise.
436 */
437int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
438 AVInputFormat *fmt,
439 int buf_size,
440 AVFormatParameters *ap)
441{
442 int err, must_open_file, file_opened, probe_size;
443 AVProbeData probe_data, *pd = &probe_data;
444 ByteIOContext pb1, *pb = &pb1;
445
446 file_opened = 0;
447 pd->filename = "";
448 if (filename)
449 pd->filename = filename;
450 pd->buf = NULL;
451 pd->buf_size = 0;
452
453 if (!fmt) {
454 /* guess format if no file can be opened */
455 fmt = av_probe_input_format(pd, 0);
456 }
457
458 /* do not open file if the format does not need it. XXX: specific
459 hack needed to handle RTSP/TCP */
460 must_open_file = 1;
461 if (fmt && (fmt->flags & AVFMT_NOFILE)) {
462 must_open_file = 0;
463 pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
464 }
465
466 if (!fmt || must_open_file) {
467 /* if no file needed do not try to open one */
468 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
469 err = AVERROR_IO;
470 goto fail;
471 }
472 file_opened = 1;
473 if (buf_size > 0) {
474 url_setbufsize(pb, buf_size);
475 }
476
477 for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
478 /* read probe data */
479 pd->buf= av_realloc(pd->buf, probe_size);
480 pd->buf_size = get_buffer(pb, pd->buf, probe_size);
481 if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
482 url_fclose(pb);
483 if (url_fopen(pb, filename, URL_RDONLY) < 0) {
484 file_opened = 0;
485 err = AVERROR_IO;
486 goto fail;
487 }
488 }
489 /* guess file format */
490 fmt = av_probe_input_format(pd, 1);
491 }
492 av_freep(&pd->buf);
493 }
494
495 /* if still no format found, error */
496 if (!fmt) {
497 err = AVERROR_NOFMT;
498 goto fail;
499 }
500
501 /* XXX: suppress this hack for redirectors */
502#ifdef CONFIG_NETWORK
503 if (fmt == &redir_demuxer) {
504 err = redir_open(ic_ptr, pb);
505 url_fclose(pb);
506 return err;
507 }
508#endif
509
510 /* check filename in case of an image number is expected */
511 if (fmt->flags & AVFMT_NEEDNUMBER) {
512 if (!av_filename_number_test(filename)) {
513 err = AVERROR_NUMEXPECTED;
514 goto fail;
515 }
516 }
517 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
518 if (err)
519 goto fail;
520 return 0;
521 fail:
522 av_freep(&pd->buf);
523 if (file_opened)
524 url_fclose(pb);
525 *ic_ptr = NULL;
526 return err;
527
528}
529
530/*******************************************************/
531
532/**
533 * Read a transport packet from a media file.
534 *
535 * This function is absolete and should never be used.
536 * Use av_read_frame() instead.
537 *
538 * @param s media file handle
539 * @param pkt is filled
540 * @return 0 if OK. AVERROR_xxx if error.
541 */
542int av_read_packet(AVFormatContext *s, AVPacket *pkt)
543{
544 return s->iformat->read_packet(s, pkt);
545}
546
547/**********************************************************/
548
549/**
550 * Get the number of samples of an audio frame. Return (-1) if error.
551 */
552static int get_audio_frame_size(AVCodecContext *enc, int size)
553{
554 int frame_size;
555
556 if (enc->frame_size <= 1) {
557 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
558
559 if (bits_per_sample) {
560 if (enc->channels == 0)
561 return -1;
562 frame_size = (size << 3) / (bits_per_sample * enc->channels);
563 } else {
564 /* used for example by ADPCM codecs */
565 if (enc->bit_rate == 0)
566 return -1;
567 frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
568 }
569 } else {
570 frame_size = enc->frame_size;
571 }
572 return frame_size;
573}
574
575
576/**
577 * Return the frame duration in seconds, return 0 if not available.
578 */
579static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
580 AVCodecParserContext *pc, AVPacket *pkt)
581{
582 int frame_size;
583
584 *pnum = 0;
585 *pden = 0;
586 switch(st->codec->codec_type) {
587 case CODEC_TYPE_VIDEO:
588 if(st->time_base.num*1000LL > st->time_base.den){
589 *pnum = st->time_base.num;
590 *pden = st->time_base.den;
591 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
592 *pnum = st->codec->time_base.num;
593 *pden = st->codec->time_base.den;
594 if (pc && pc->repeat_pict) {
595 *pden *= 2;
596 *pnum = (*pnum) * (2 + pc->repeat_pict);
597 }
598 }
599 break;
600 case CODEC_TYPE_AUDIO:
601 frame_size = get_audio_frame_size(st->codec, pkt->size);
602 if (frame_size < 0)
603 break;
604 *pnum = frame_size;
605 *pden = st->codec->sample_rate;
606 break;
607 default:
608 break;
609 }
610}
611
612static int is_intra_only(AVCodecContext *enc){
613 if(enc->codec_type == CODEC_TYPE_AUDIO){
614 return 1;
615 }else if(enc->codec_type == CODEC_TYPE_VIDEO){
616 switch(enc->codec_id){
617 case CODEC_ID_MJPEG:
618 case CODEC_ID_MJPEGB:
619 case CODEC_ID_LJPEG:
620 case CODEC_ID_RAWVIDEO:
621 case CODEC_ID_DVVIDEO:
622 case CODEC_ID_HUFFYUV:
623 case CODEC_ID_FFVHUFF:
624 case CODEC_ID_ASV1:
625 case CODEC_ID_ASV2:
626 case CODEC_ID_VCR1:
627 return 1;
628 default: break;
629 }
630 }
631 return 0;
632}
633
634static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
635 int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
636 int64_t delta= last_ts - mask/2;
637 return ((lsb - delta)&mask) + delta;
638}
639
640static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
641 AVCodecParserContext *pc, AVPacket *pkt)
642{
643 int num, den, presentation_delayed;
644 /* handle wrapping */
645 if(st->cur_dts != AV_NOPTS_VALUE){
646 if(pkt->pts != AV_NOPTS_VALUE)
647 pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
648 if(pkt->dts != AV_NOPTS_VALUE)
649 pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
650 }
651
652 if (pkt->duration == 0) {
653 compute_frame_duration(&num, &den, st, pc, pkt);
654 if (den && num) {
655 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
656 }
657 }
658
659 if(is_intra_only(st->codec))
660 pkt->flags |= PKT_FLAG_KEY;
661
662 /* do we have a video B frame ? */
663 presentation_delayed = 0;
664 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
665 /* XXX: need has_b_frame, but cannot get it if the codec is
666 not initialized */
667 if (( st->codec->codec_id == CODEC_ID_H264
668 || st->codec->has_b_frames) &&
669 pc && pc->pict_type != FF_B_TYPE)
670 presentation_delayed = 1;
671 /* this may be redundant, but it shouldnt hurt */
672 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
673 presentation_delayed = 1;
674 }
675
676 if(st->cur_dts == AV_NOPTS_VALUE){
677 if(presentation_delayed) st->cur_dts = -pkt->duration;
678 else st->cur_dts = 0;
679 }
680
681// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
682 /* interpolate PTS and DTS if they are not present */
683 if (presentation_delayed) {
684 /* DTS = decompression time stamp */
685 /* PTS = presentation time stamp */
686 if (pkt->dts == AV_NOPTS_VALUE) {
687 /* if we know the last pts, use it */
688 if(st->last_IP_pts != AV_NOPTS_VALUE)
689 st->cur_dts = pkt->dts = st->last_IP_pts;
690 else
691 pkt->dts = st->cur_dts;
692 } else {
693 st->cur_dts = pkt->dts;
694 }
695 /* this is tricky: the dts must be incremented by the duration
696 of the frame we are displaying, i.e. the last I or P frame */
697 if (st->last_IP_duration == 0)
698 st->cur_dts += pkt->duration;
699 else
700 st->cur_dts += st->last_IP_duration;
701 st->last_IP_duration = pkt->duration;
702 st->last_IP_pts= pkt->pts;
703 /* cannot compute PTS if not present (we can compute it only
704 by knowing the futur */
705 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
706 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
707 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
708 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
709 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
710 pkt->pts += pkt->duration;
711// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
712 }
713 }
714
715 /* presentation is not delayed : PTS and DTS are the same */
716 if (pkt->pts == AV_NOPTS_VALUE) {
717 if (pkt->dts == AV_NOPTS_VALUE) {
718 pkt->pts = st->cur_dts;
719 pkt->dts = st->cur_dts;
720 }
721 else {
722 st->cur_dts = pkt->dts;
723 pkt->pts = pkt->dts;
724 }
725 } else {
726 st->cur_dts = pkt->pts;
727 pkt->dts = pkt->pts;
728 }
729 st->cur_dts += pkt->duration;
730 }
731// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
732
733 /* update flags */
734 if (pc) {
735 pkt->flags = 0;
736 /* key frame computation */
737 switch(st->codec->codec_type) {
738 case CODEC_TYPE_VIDEO:
739 if (pc->pict_type == FF_I_TYPE)
740 pkt->flags |= PKT_FLAG_KEY;
741 break;
742 case CODEC_TYPE_AUDIO:
743 pkt->flags |= PKT_FLAG_KEY;
744 break;
745 default:
746 break;
747 }
748 }
749}
750
751void av_destruct_packet_nofree(AVPacket *pkt)
752{
753 pkt->data = NULL; pkt->size = 0;
754}
755
756static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
757{
758 AVStream *st;
759 int len, ret, i;
760
761 for(;;) {
762 /* select current input stream component */
763 st = s->cur_st;
764 if (st) {
765 if (!st->need_parsing || !st->parser) {
766 /* no parsing needed: we just output the packet as is */
767 /* raw data support */
768 *pkt = s->cur_pkt;
769 compute_pkt_fields(s, st, NULL, pkt);
770 s->cur_st = NULL;
771 break;
772 } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
773 len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
774 s->cur_ptr, s->cur_len,
775 s->cur_pkt.pts, s->cur_pkt.dts);
776 s->cur_pkt.pts = AV_NOPTS_VALUE;
777 s->cur_pkt.dts = AV_NOPTS_VALUE;
778 /* increment read pointer */
779 s->cur_ptr += len;
780 s->cur_len -= len;
781
782 /* return packet if any */
783 if (pkt->size) {
784 got_packet:
785 pkt->duration = 0;
786 pkt->stream_index = st->index;
787 pkt->pts = st->parser->pts;
788 pkt->dts = st->parser->dts;
789 pkt->destruct = av_destruct_packet_nofree;
790 compute_pkt_fields(s, st, st->parser, pkt);
791 break;
792 }
793 } else {
794 /* free packet */
795 av_free_packet(&s->cur_pkt);
796 s->cur_st = NULL;
797 }
798 } else {
799 /* read next packet */
800 ret = av_read_packet(s, &s->cur_pkt);
801 if (ret < 0) {
802 if (ret == -EAGAIN)
803 return ret;
804 /* return the last frames, if any */
805 for(i = 0; i < s->nb_streams; i++) {
806 st = s->streams[i];
807 if (st->parser && st->need_parsing) {
808 av_parser_parse(st->parser, st->codec,
809 &pkt->data, &pkt->size,
810 NULL, 0,
811 AV_NOPTS_VALUE, AV_NOPTS_VALUE);
812 if (pkt->size)
813 goto got_packet;
814 }
815 }
816 /* no more packets: really terminates parsing */
817 return ret;
818 }
819
820 st = s->streams[s->cur_pkt.stream_index];
821 if(st->codec->debug & FF_DEBUG_PTS)
822 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
823 s->cur_pkt.stream_index,
824 s->cur_pkt.pts,
825 s->cur_pkt.dts,
826 s->cur_pkt.size);
827
828 s->cur_st = st;
829 s->cur_ptr = s->cur_pkt.data;
830 s->cur_len = s->cur_pkt.size;
831 if (st->need_parsing && !st->parser) {
832 st->parser = av_parser_init(st->codec->codec_id);
833 if (!st->parser) {
834 /* no parser available : just output the raw packets */
835 st->need_parsing = 0;
836 }else if(st->need_parsing == 2){
837 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
838 }
839 }
840 }
841 }
842 if(st->codec->debug & FF_DEBUG_PTS)
843 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
844 pkt->stream_index,
845 pkt->pts,
846 pkt->dts,
847 pkt->size);
848
849 return 0;
850}
851
852/**
853 * Return the next frame of a stream.
854 *
855 * The returned packet is valid
856 * until the next av_read_frame() or until av_close_input_file() and
857 * must be freed with av_free_packet. For video, the packet contains
858 * exactly one frame. For audio, it contains an integer number of
859 * frames if each frame has a known fixed size (e.g. PCM or ADPCM
860 * data). If the audio frames have a variable size (e.g. MPEG audio),
861 * then it contains one frame.
862 *
863 * pkt->pts, pkt->dts and pkt->duration are always set to correct
864 * values in AV_TIME_BASE unit (and guessed if the format cannot
865 * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
866 * has B frames, so it is better to rely on pkt->dts if you do not
867 * decompress the payload.
868 *
869 * @return 0 if OK, < 0 if error or end of file.
870 */
871int av_read_frame(AVFormatContext *s, AVPacket *pkt)
872{
873 AVPacketList *pktl;
874 int eof=0;
875 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
876
877 for(;;){
878 pktl = s->packet_buffer;
879 if (pktl) {
880 AVPacket *next_pkt= &pktl->pkt;
881
882 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
883 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
884 if( pktl->pkt.stream_index == next_pkt->stream_index
885 && next_pkt->dts < pktl->pkt.dts
886 && pktl->pkt.pts != pktl->pkt.dts //not b frame
887 /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
888 next_pkt->pts= pktl->pkt.dts;
889 }
890 pktl= pktl->next;
891 }
892 pktl = s->packet_buffer;
893 }
894
895 if( next_pkt->pts != AV_NOPTS_VALUE
896 || next_pkt->dts == AV_NOPTS_VALUE
897 || !genpts || eof){
898 /* read packet from packet buffer, if there is data */
899 *pkt = *next_pkt;
900 s->packet_buffer = pktl->next;
901 av_free(pktl);
902 return 0;
903 }
904 }
905 if(genpts){
906 AVPacketList **plast_pktl= &s->packet_buffer;
907 int ret= av_read_frame_internal(s, pkt);
908 if(ret<0){
909 if(pktl && ret != -EAGAIN){
910 eof=1;
911 continue;
912 }else
913 return ret;
914 }
915
916 /* duplicate the packet */
917 if (av_dup_packet(pkt) < 0)
918 return AVERROR_NOMEM;
919
920 while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
921
922 pktl = av_mallocz(sizeof(AVPacketList));
923 if (!pktl)
924 return AVERROR_NOMEM;
925
926 /* add the packet in the buffered packet list */
927 *plast_pktl = pktl;
928 pktl->pkt= *pkt;
929 }else{
930 assert(!s->packet_buffer);
931 return av_read_frame_internal(s, pkt);
932 }
933 }
934}
935
936/* XXX: suppress the packet queue */
937static void flush_packet_queue(AVFormatContext *s)
938{
939 AVPacketList *pktl;
940
941 for(;;) {
942 pktl = s->packet_buffer;
943 if (!pktl)
944 break;
945 s->packet_buffer = pktl->next;
946 av_free_packet(&pktl->pkt);
947 av_free(pktl);
948 }
949}
950
951/*******************************************************/
952/* seek support */
953
954int av_find_default_stream_index(AVFormatContext *s)
955{
956 int i;
957 AVStream *st;
958
959 if (s->nb_streams <= 0)
960 return -1;
961 for(i = 0; i < s->nb_streams; i++) {
962 st = s->streams[i];
963 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
964 return i;
965 }
966 }
967 return 0;
968}
969
970/**
971 * Flush the frame reader.
972 */
973static void av_read_frame_flush(AVFormatContext *s)
974{
975 AVStream *st;
976 int i;
977
978 flush_packet_queue(s);
979
980 /* free previous packet */
981 if (s->cur_st) {
982 if (s->cur_st->parser)
983 av_free_packet(&s->cur_pkt);
984 s->cur_st = NULL;
985 }
986 /* fail safe */
987 s->cur_ptr = NULL;
988 s->cur_len = 0;
989
990 /* for each stream, reset read state */
991 for(i = 0; i < s->nb_streams; i++) {
992 st = s->streams[i];
993
994 if (st->parser) {
995 av_parser_close(st->parser);
996 st->parser = NULL;
997 }
998 st->last_IP_pts = AV_NOPTS_VALUE;
999 st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
1000 }
1001}
1002
1003/**
1004 * Updates cur_dts of all streams based on given timestamp and AVStream.
1005 *
1006 * Stream ref_st unchanged, others set cur_dts in their native timebase
1007 * only needed for timestamp wrapping or if (dts not set and pts!=dts)
1008 * @param timestamp new dts expressed in time_base of param ref_st
1009 * @param ref_st reference stream giving time_base of param timestamp
1010 */
1011void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1012 int i;
1013
1014 for(i = 0; i < s->nb_streams; i++) {
1015 AVStream *st = s->streams[i];
1016
1017 st->cur_dts = av_rescale(timestamp,
1018 st->time_base.den * (int64_t)ref_st->time_base.num,
1019 st->time_base.num * (int64_t)ref_st->time_base.den);
1020 }
1021}
1022
1023/**
1024 * Add a index entry into a sorted list updateing if it is already there.
1025 *
1026 * @param timestamp timestamp in the timebase of the given stream
1027 */
1028int av_add_index_entry(AVStream *st,
1029 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1030{
1031 AVIndexEntry *entries, *ie;
1032 int index;
1033
1034 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1035 return -1;
1036
1037 entries = av_fast_realloc(st->index_entries,
1038 &st->index_entries_allocated_size,
1039 (st->nb_index_entries + 1) *
1040 sizeof(AVIndexEntry));
1041 if(!entries)
1042 return -1;
1043
1044 st->index_entries= entries;
1045
1046 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1047
1048 if(index<0){
1049 index= st->nb_index_entries++;
1050 ie= &entries[index];
1051 assert(index==0 || ie[-1].timestamp < timestamp);
1052 }else{
1053 ie= &entries[index];
1054 if(ie->timestamp != timestamp){
1055 if(ie->timestamp <= timestamp)
1056 return -1;
1057 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1058 st->nb_index_entries++;
1059 }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
1060 distance= ie->min_distance;
1061 }
1062
1063 ie->pos = pos;
1064 ie->timestamp = timestamp;
1065 ie->min_distance= distance;
1066 ie->size= size;
1067 ie->flags = flags;
1068
1069 return index;
1070}
1071
1072/**
1073 * build an index for raw streams using a parser.
1074 */
1075static void av_build_index_raw(AVFormatContext *s)
1076{
1077 AVPacket pkt1, *pkt = &pkt1;
1078 int ret;
1079 AVStream *st;
1080
1081 st = s->streams[0];
1082 av_read_frame_flush(s);
1083 url_fseek(&s->pb, s->data_offset, SEEK_SET);
1084
1085 for(;;) {
1086 ret = av_read_frame(s, pkt);
1087 if (ret < 0)
1088 break;
1089 if (pkt->stream_index == 0 && st->parser &&
1090 (pkt->flags & PKT_FLAG_KEY)) {
1091 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1092 0, 0, AVINDEX_KEYFRAME);
1093 }
1094 av_free_packet(pkt);
1095 }
1096}
1097
1098/**
1099 * Returns TRUE if we deal with a raw stream.
1100 *
1101 * Raw codec data and parsing needed.
1102 */
1103static int is_raw_stream(AVFormatContext *s)
1104{
1105 AVStream *st;
1106
1107 if (s->nb_streams != 1)
1108 return 0;
1109 st = s->streams[0];
1110 if (!st->need_parsing)
1111 return 0;
1112 return 1;
1113}
1114
1115/**
1116 * Gets the index for a specific timestamp.
1117 * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
1118 * the timestamp which is <= the requested one, if backward is 0
1119 * then it will be >=
1120 * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
1121 * @return < 0 if no such timestamp could be found
1122 */
1123int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1124 int flags)
1125{
1126 AVIndexEntry *entries= st->index_entries;
1127 int nb_entries= st->nb_index_entries;
1128 int a, b, m;
1129 int64_t timestamp;
1130
1131 a = - 1;
1132 b = nb_entries;
1133
1134 while (b - a > 1) {
1135 m = (a + b) >> 1;
1136 timestamp = entries[m].timestamp;
1137 if(timestamp >= wanted_timestamp)
1138 b = m;
1139 if(timestamp <= wanted_timestamp)
1140 a = m;
1141 }
1142 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1143
1144 if(!(flags & AVSEEK_FLAG_ANY)){
1145 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1146 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1147 }
1148 }
1149
1150 if(m == nb_entries)
1151 return -1;
1152 return m;
1153}
1154
1155#define DEBUG_SEEK
1156
1157/**
1158 * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
1159 * this isnt supposed to be called directly by a user application, but by demuxers
1160 * @param target_ts target timestamp in the time base of the given stream
1161 * @param stream_index stream number
1162 */
1163int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1164 AVInputFormat *avif= s->iformat;
1165 int64_t pos_min, pos_max, pos, pos_limit;
1166 int64_t ts_min, ts_max, ts;
1167 int64_t start_pos, filesize;
1168 int index, no_change;
1169 AVStream *st;
1170
1171 if (stream_index < 0)
1172 return -1;
1173
1174#ifdef DEBUG_SEEK
1175 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1176#endif
1177
1178 ts_max=
1179 ts_min= AV_NOPTS_VALUE;
1180 pos_limit= -1; //gcc falsely says it may be uninitalized
1181
1182 st= s->streams[stream_index];
1183 if(st->index_entries){
1184 AVIndexEntry *e;
1185
1186 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
1187 index= FFMAX(index, 0);
1188 e= &st->index_entries[index];
1189
1190 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1191 pos_min= e->pos;
1192 ts_min= e->timestamp;
1193#ifdef DEBUG_SEEK
1194 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1195 pos_min,ts_min);
1196#endif
1197 }else{
1198 assert(index==0);
1199 }
1200
1201 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1202 assert(index < st->nb_index_entries);
1203 if(index >= 0){
1204 e= &st->index_entries[index];
1205 assert(e->timestamp >= target_ts);
1206 pos_max= e->pos;
1207 ts_max= e->timestamp;
1208 pos_limit= pos_max - e->min_distance;
1209#ifdef DEBUG_SEEK
1210 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1211 pos_max,pos_limit, ts_max);
1212#endif
1213 }
1214 }
1215
1216 if(ts_min == AV_NOPTS_VALUE){
1217 pos_min = s->data_offset;
1218 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1219 if (ts_min == AV_NOPTS_VALUE)
1220 return -1;
1221 }
1222
1223 if(ts_max == AV_NOPTS_VALUE){
1224 int step= 1024;
1225 filesize = url_fsize(&s->pb);
1226 pos_max = filesize - 1;
1227 do{
1228 pos_max -= step;
1229 ts_max = avif->read_timestamp(s, stream_index, &pos_max, pos_max + step);
1230 step += step;
1231 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1232 if (ts_max == AV_NOPTS_VALUE)
1233 return -1;
1234
1235 for(;;){
1236 int64_t tmp_pos= pos_max + 1;
1237 int64_t tmp_ts= avif->read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1238 if(tmp_ts == AV_NOPTS_VALUE)
1239 break;
1240 ts_max= tmp_ts;
1241 pos_max= tmp_pos;
1242 if(tmp_pos >= filesize)
1243 break;
1244 }
1245 pos_limit= pos_max;
1246 }
1247
1248 if(ts_min > ts_max){
1249 return -1;
1250 }else if(ts_min == ts_max){
1251 pos_limit= pos_min;
1252 }
1253
1254 no_change=0;
1255 while (pos_min < pos_limit) {
1256#ifdef DEBUG_SEEK
1257 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1258 pos_min, pos_max,
1259 ts_min, ts_max);
1260#endif
1261 assert(pos_limit <= pos_max);
1262
1263 if(no_change==0){
1264 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1265 // interpolate position (better than dichotomy)
1266 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1267 + pos_min - approximate_keyframe_distance;
1268 }else if(no_change==1){
1269 // bisection, if interpolation failed to change min or max pos last time
1270 pos = (pos_min + pos_limit)>>1;
1271 }else{
1272 // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
1273 pos=pos_min;
1274 }
1275 if(pos <= pos_min)
1276 pos= pos_min + 1;
1277 else if(pos > pos_limit)
1278 pos= pos_limit;
1279 start_pos= pos;
1280
1281 ts = avif->read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1282 if(pos == pos_max)
1283 no_change++;
1284 else
1285 no_change=0;
1286#ifdef DEBUG_SEEK
1287av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
1288#endif
1289 assert(ts != AV_NOPTS_VALUE);
1290 if (target_ts <= ts) {
1291 pos_limit = start_pos - 1;
1292 pos_max = pos;
1293 ts_max = ts;
1294 }
1295 if (target_ts >= ts) {
1296 pos_min = pos;
1297 ts_min = ts;
1298 }
1299 }
1300
1301 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1302 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1303#ifdef DEBUG_SEEK
1304 pos_min = pos;
1305 ts_min = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1306 pos_min++;
1307 ts_max = avif->read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1308 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1309 pos, ts_min, target_ts, ts_max);
1310#endif
1311 /* do the seek */
1312 url_fseek(&s->pb, pos, SEEK_SET);
1313
1314 av_update_cur_dts(s, st, ts);
1315
1316 return 0;
1317}
1318
1319static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1320 int64_t pos_min, pos_max;
1321#if 0
1322 AVStream *st;
1323
1324 if (stream_index < 0)
1325 return -1;
1326
1327 st= s->streams[stream_index];
1328#endif
1329
1330 pos_min = s->data_offset;
1331 pos_max = url_fsize(&s->pb) - 1;
1332
1333 if (pos < pos_min) pos= pos_min;
1334 else if(pos > pos_max) pos= pos_max;
1335
1336 url_fseek(&s->pb, pos, SEEK_SET);
1337
1338#if 0
1339 av_update_cur_dts(s, st, ts);
1340#endif
1341 return 0;
1342}
1343
1344static int av_seek_frame_generic(AVFormatContext *s,
1345 int stream_index, int64_t timestamp, int flags)
1346{
1347 int index;
1348 AVStream *st;
1349 AVIndexEntry *ie;
1350
1351 if (!s->index_built) {
1352 if (is_raw_stream(s)) {
1353 av_build_index_raw(s);
1354 } else {
1355 return -1;
1356 }
1357 s->index_built = 1;
1358 }
1359
1360 st = s->streams[stream_index];
1361 index = av_index_search_timestamp(st, timestamp, flags);
1362 if (index < 0)
1363 return -1;
1364
1365 /* now we have found the index, we can seek */
1366 ie = &st->index_entries[index];
1367 av_read_frame_flush(s);
1368 url_fseek(&s->pb, ie->pos, SEEK_SET);
1369
1370 av_update_cur_dts(s, st, ie->timestamp);
1371
1372 return 0;
1373}
1374
1375/**
1376 * Seek to the key frame at timestamp.
1377 * 'timestamp' in 'stream_index'.
1378 * @param stream_index If stream_index is (-1), a default
1379 * stream is selected, and timestamp is automatically converted
1380 * from AV_TIME_BASE units to the stream specific time_base.
1381 * @param timestamp timestamp in AVStream.time_base units
1382 * or if there is no stream specified then in AV_TIME_BASE units
1383 * @param flags flags which select direction and seeking mode
1384 * @return >= 0 on success
1385 */
1386int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1387{
1388 int ret;
1389 AVStream *st;
1390
1391 av_read_frame_flush(s);
1392
1393 if(flags & AVSEEK_FLAG_BYTE)
1394 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1395
1396 if(stream_index < 0){
1397 stream_index= av_find_default_stream_index(s);
1398 if(stream_index < 0)
1399 return -1;
1400
1401 st= s->streams[stream_index];
1402 /* timestamp for default must be expressed in AV_TIME_BASE units */
1403 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1404 }
1405 st= s->streams[stream_index];
1406
1407 /* first, we try the format specific seek */
1408 if (s->iformat->read_seek)
1409 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1410 else
1411 ret = -1;
1412 if (ret >= 0) {
1413 return 0;
1414 }
1415
1416 if(s->iformat->read_timestamp)
1417 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1418 else
1419 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1420}
1421
1422/*******************************************************/
1423
1424/**
1425 * Returns TRUE if the stream has accurate timings in any stream.
1426 *
1427 * @return TRUE if the stream has accurate timings for at least one component.
1428 */
1429static int av_has_timings(AVFormatContext *ic)
1430{
1431 int i;
1432 AVStream *st;
1433
1434 for(i = 0;i < ic->nb_streams; i++) {
1435 st = ic->streams[i];
1436 if (st->start_time != AV_NOPTS_VALUE &&
1437 st->duration != AV_NOPTS_VALUE)
1438 return 1;
1439 }
1440 return 0;
1441}
1442
1443/**
1444 * Estimate the stream timings from the one of each components.
1445 *
1446 * Also computes the global bitrate if possible.
1447 */
1448static void av_update_stream_timings(AVFormatContext *ic)
1449{
1450 int64_t start_time, start_time1, end_time, end_time1;
1451 int i;
1452 AVStream *st;
1453
1454 start_time = MAXINT64;
1455 end_time = MININT64;
1456 for(i = 0;i < ic->nb_streams; i++) {
1457 st = ic->streams[i];
1458 if (st->start_time != AV_NOPTS_VALUE) {
1459 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1460 if (start_time1 < start_time)
1461 start_time = start_time1;
1462 if (st->duration != AV_NOPTS_VALUE) {
1463 end_time1 = start_time1
1464 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1465 if (end_time1 > end_time)
1466 end_time = end_time1;
1467 }
1468 }
1469 }
1470 if (start_time != MAXINT64) {
1471 ic->start_time = start_time;
1472 if (end_time != MININT64) {
1473 ic->duration = end_time - start_time;
1474 if (ic->file_size > 0) {
1475 /* compute the bit rate */
1476 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1477 (double)ic->duration;
1478 }
1479 }
1480 }
1481
1482}
1483
1484static void fill_all_stream_timings(AVFormatContext *ic)
1485{
1486 int i;
1487 AVStream *st;
1488
1489 av_update_stream_timings(ic);
1490 for(i = 0;i < ic->nb_streams; i++) {
1491 st = ic->streams[i];
1492 if (st->start_time == AV_NOPTS_VALUE) {
1493 if(ic->start_time != AV_NOPTS_VALUE)
1494 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1495 if(ic->duration != AV_NOPTS_VALUE)
1496 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1497 }
1498 }
1499}
1500
1501static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1502{
1503 int64_t filesize, duration;
1504 int bit_rate, i;
1505 AVStream *st;
1506
1507 /* if bit_rate is already set, we believe it */
1508 if (ic->bit_rate == 0) {
1509 bit_rate = 0;
1510 for(i=0;i<ic->nb_streams;i++) {
1511 st = ic->streams[i];
1512 bit_rate += st->codec->bit_rate;
1513 }
1514 ic->bit_rate = bit_rate;
1515 }
1516
1517 /* if duration is already set, we believe it */
1518 if (ic->duration == AV_NOPTS_VALUE &&
1519 ic->bit_rate != 0 &&
1520 ic->file_size != 0) {
1521 filesize = ic->file_size;
1522 if (filesize > 0) {
1523 for(i = 0; i < ic->nb_streams; i++) {
1524 st = ic->streams[i];
1525 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1526 if (st->start_time == AV_NOPTS_VALUE ||
1527 st->duration == AV_NOPTS_VALUE) {
1528 st->start_time = 0;
1529 st->duration = duration;
1530 }
1531 }
1532 }
1533 }
1534}
1535
1536#define DURATION_MAX_READ_SIZE 250000
1537
1538/* only usable for MPEG-PS streams */
1539static void av_estimate_timings_from_pts(AVFormatContext *ic)
1540{
1541 AVPacket pkt1, *pkt = &pkt1;
1542 AVStream *st;
1543 int read_size, i, ret;
1544 int64_t end_time;
1545 int64_t filesize, offset, duration;
1546
1547 /* free previous packet */
1548 if (ic->cur_st && ic->cur_st->parser)
1549 av_free_packet(&ic->cur_pkt);
1550 ic->cur_st = NULL;
1551
1552 /* flush packet queue */
1553 flush_packet_queue(ic);
1554
1555 for(i=0;i<ic->nb_streams;i++) {
1556 st = ic->streams[i];
1557 if (st->parser) {
1558 av_parser_close(st->parser);
1559 st->parser= NULL;
1560 }
1561 }
1562
1563 /* we read the first packets to get the first PTS (not fully
1564 accurate, but it is enough now) */
1565 url_fseek(&ic->pb, 0, SEEK_SET);
1566 read_size = 0;
1567 for(;;) {
1568 if (read_size >= DURATION_MAX_READ_SIZE)
1569 break;
1570 /* if all info is available, we can stop */
1571 for(i = 0;i < ic->nb_streams; i++) {
1572 st = ic->streams[i];
1573 if (st->start_time == AV_NOPTS_VALUE)
1574 break;
1575 }
1576 if (i == ic->nb_streams)
1577 break;
1578
1579 ret = av_read_packet(ic, pkt);
1580 if (ret != 0)
1581 break;
1582 read_size += pkt->size;
1583 st = ic->streams[pkt->stream_index];
1584 if (pkt->pts != AV_NOPTS_VALUE) {
1585 if (st->start_time == AV_NOPTS_VALUE)
1586 st->start_time = pkt->pts;
1587 }
1588 av_free_packet(pkt);
1589 }
1590
1591 /* estimate the end time (duration) */
1592 /* XXX: may need to support wrapping */
1593 filesize = ic->file_size;
1594 offset = filesize - DURATION_MAX_READ_SIZE;
1595 if (offset < 0)
1596 offset = 0;
1597
1598 url_fseek(&ic->pb, offset, SEEK_SET);
1599 read_size = 0;
1600 for(;;) {
1601 if (read_size >= DURATION_MAX_READ_SIZE)
1602 break;
1603 /* if all info is available, we can stop */
1604 for(i = 0;i < ic->nb_streams; i++) {
1605 st = ic->streams[i];
1606 if (st->duration == AV_NOPTS_VALUE)
1607 break;
1608 }
1609 if (i == ic->nb_streams)
1610 break;
1611
1612 ret = av_read_packet(ic, pkt);
1613 if (ret != 0)
1614 break;
1615 read_size += pkt->size;
1616 st = ic->streams[pkt->stream_index];
1617 if (pkt->pts != AV_NOPTS_VALUE) {
1618 end_time = pkt->pts;
1619 duration = end_time - st->start_time;
1620 if (duration > 0) {
1621 if (st->duration == AV_NOPTS_VALUE ||
1622 st->duration < duration)
1623 st->duration = duration;
1624 }
1625 }
1626 av_free_packet(pkt);
1627 }
1628
1629 fill_all_stream_timings(ic);
1630
1631 url_fseek(&ic->pb, 0, SEEK_SET);
1632}
1633
1634static void av_estimate_timings(AVFormatContext *ic)
1635{
1636 int64_t file_size;
1637
1638 /* get the file size, if possible */
1639 if (ic->iformat->flags & AVFMT_NOFILE) {
1640 file_size = 0;
1641 } else {
1642 file_size = url_fsize(&ic->pb);
1643 if (file_size < 0)
1644 file_size = 0;
1645 }
1646 ic->file_size = file_size;
1647
1648 if ((!strcmp(ic->iformat->name, "mpeg") ||
1649 !strcmp(ic->iformat->name, "mpegts")) &&
1650 file_size && !ic->pb.is_streamed) {
1651 /* get accurate estimate from the PTSes */
1652 av_estimate_timings_from_pts(ic);
1653 } else if (av_has_timings(ic)) {
1654 /* at least one components has timings - we use them for all
1655 the components */
1656 fill_all_stream_timings(ic);
1657 } else {
1658 /* less precise: use bit rate info */
1659 av_estimate_timings_from_bit_rate(ic);
1660 }
1661 av_update_stream_timings(ic);
1662
1663#if 0
1664 {
1665 int i;
1666 AVStream *st;
1667 for(i = 0;i < ic->nb_streams; i++) {
1668 st = ic->streams[i];
1669 printf("%d: start_time: %0.3f duration: %0.3f\n",
1670 i, (double)st->start_time / AV_TIME_BASE,
1671 (double)st->duration / AV_TIME_BASE);
1672 }
1673 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1674 (double)ic->start_time / AV_TIME_BASE,
1675 (double)ic->duration / AV_TIME_BASE,
1676 ic->bit_rate / 1000);
1677 }
1678#endif
1679}
1680
1681static int has_codec_parameters(AVCodecContext *enc)
1682{
1683 int val;
1684 switch(enc->codec_type) {
1685 case CODEC_TYPE_AUDIO:
1686 val = enc->sample_rate;
1687 break;
1688 case CODEC_TYPE_VIDEO:
1689 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
1690 break;
1691 default:
1692 val = 1;
1693 break;
1694 }
1695 return (val != 0);
1696}
1697
1698static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
1699{
1700 int16_t *samples;
1701 AVCodec *codec;
1702 int got_picture, ret=0;
1703 AVFrame picture;
1704
1705 if(!st->codec->codec){
1706 codec = avcodec_find_decoder(st->codec->codec_id);
1707 if (!codec)
1708 return -1;
1709 ret = avcodec_open(st->codec, codec);
1710 if (ret < 0)
1711 return ret;
1712 }
1713
1714 if(!has_codec_parameters(st->codec)){
1715 switch(st->codec->codec_type) {
1716 case CODEC_TYPE_VIDEO:
1717 ret = avcodec_decode_video(st->codec, &picture,
1718 &got_picture, (uint8_t *)data, size);
1719 break;
1720 case CODEC_TYPE_AUDIO:
1721 samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
1722 if (!samples)
1723 goto fail;
1724 ret = avcodec_decode_audio(st->codec, samples,
1725 &got_picture, (uint8_t *)data, size);
1726 av_free(samples);
1727 break;
1728 default:
1729 break;
1730 }
1731 }
1732 fail:
1733 return ret;
1734}
1735
1736/* absolute maximum size we read until we abort */
1737#define MAX_READ_SIZE 5000000
1738
1739/* maximum duration until we stop analysing the stream */
1740#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
1741
1742/**
1743 * Read the beginning of a media file to get stream information. This
1744 * is useful for file formats with no headers such as MPEG. This
1745 * function also compute the real frame rate in case of mpeg2 repeat
1746 * frame mode.
1747 *
1748 * @param ic media file handle
1749 * @return >=0 if OK. AVERROR_xxx if error.
1750 * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
1751 */
1752int av_find_stream_info(AVFormatContext *ic)
1753{
1754 int i, count, ret, read_size, j;
1755 AVStream *st;
1756 AVPacket pkt1, *pkt;
1757 AVPacketList *pktl=NULL, **ppktl;
1758 int64_t last_dts[MAX_STREAMS];
1759 int64_t duration_sum[MAX_STREAMS];
1760 int duration_count[MAX_STREAMS]={0};
1761
1762 for(i=0;i<ic->nb_streams;i++) {
1763 st = ic->streams[i];
1764 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
1765/* if(!st->time_base.num)
1766 st->time_base= */
1767 if(!st->codec->time_base.num)
1768 st->codec->time_base= st->time_base;
1769 }
1770 //only for the split stuff
1771 if (!st->parser) {
1772 st->parser = av_parser_init(st->codec->codec_id);
1773 if(st->need_parsing == 2 && st->parser){
1774 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1775 }
1776 }
1777 }
1778
1779 for(i=0;i<MAX_STREAMS;i++){
1780 last_dts[i]= AV_NOPTS_VALUE;
1781 duration_sum[i]= INT64_MAX;
1782 }
1783
1784 count = 0;
1785 read_size = 0;
1786 ppktl = &ic->packet_buffer;
1787 for(;;) {
1788 /* check if one codec still needs to be handled */
1789 for(i=0;i<ic->nb_streams;i++) {
1790 st = ic->streams[i];
1791 if (!has_codec_parameters(st->codec))
1792 break;
1793 /* variable fps and no guess at the real fps */
1794 if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
1795 && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
1796 break;
1797 if(st->parser && st->parser->parser->split && !st->codec->extradata)
1798 break;
1799 }
1800 if (i == ic->nb_streams) {
1801 /* NOTE: if the format has no header, then we need to read
1802 some packets to get most of the streams, so we cannot
1803 stop here */
1804 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
1805 /* if we found the info for all the codecs, we can stop */
1806 ret = count;
1807 break;
1808 }
1809 }
1810 /* we did not get all the codec info, but we read too much data */
1811 if (read_size >= MAX_READ_SIZE) {
1812 ret = count;
1813 break;
1814 }
1815
1816 /* NOTE: a new stream can be added there if no header in file
1817 (AVFMTCTX_NOHEADER) */
1818 ret = av_read_frame_internal(ic, &pkt1);
1819 if (ret < 0) {
1820 /* EOF or error */
1821 ret = -1; /* we could not have all the codec parameters before EOF */
1822 for(i=0;i<ic->nb_streams;i++) {
1823 st = ic->streams[i];
1824 if (!has_codec_parameters(st->codec)){
1825 char buf[256];
1826 avcodec_string(buf, sizeof(buf), st->codec, 0);
1827 av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
1828 } else {
1829 ret = 0;
1830 }
1831 }
1832 break;
1833 }
1834
1835 pktl = av_mallocz(sizeof(AVPacketList));
1836 if (!pktl) {
1837 ret = AVERROR_NOMEM;
1838 break;
1839 }
1840
1841 /* add the packet in the buffered packet list */
1842 *ppktl = pktl;
1843 ppktl = &pktl->next;
1844
1845 pkt = &pktl->pkt;
1846 *pkt = pkt1;
1847
1848 /* duplicate the packet */
1849 if (av_dup_packet(pkt) < 0) {
1850 ret = AVERROR_NOMEM;
1851 break;
1852 }
1853
1854 read_size += pkt->size;
1855
1856 st = ic->streams[pkt->stream_index];
1857 st->codec_info_duration += pkt->duration;
1858 if (pkt->duration != 0)
1859 st->codec_info_nb_frames++;
1860
1861 {
1862 int index= pkt->stream_index;
1863 int64_t last= last_dts[index];
1864 int64_t duration= pkt->dts - last;
1865
1866 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
1867 if(duration*duration_count[index]*10/9 < duration_sum[index]){
1868 duration_sum[index]= duration;
1869 duration_count[index]=1;
1870 }else{
1871 int factor= av_rescale(2*duration, duration_count[index], duration_sum[index]);
1872 if(factor==3)
1873 duration_count[index] *= 2;
1874 factor= av_rescale(duration, duration_count[index], duration_sum[index]);
1875 duration_sum[index] += duration;
1876 duration_count[index]+= factor;
1877 }
1878 if(st->codec_info_nb_frames == 0 && 0)
1879 st->codec_info_duration += duration;
1880 }
1881 last_dts[pkt->stream_index]= pkt->dts;
1882 }
1883 if(st->parser && st->parser->parser->split && !st->codec->extradata){
1884 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
1885 if(i){
1886 st->codec->extradata_size= i;
1887 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
1888 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
1889 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
1890 }
1891 }
1892
1893 /* if still no information, we try to open the codec and to
1894 decompress the frame. We try to avoid that in most cases as
1895 it takes longer and uses more memory. For MPEG4, we need to
1896 decompress for Quicktime. */
1897 if (!has_codec_parameters(st->codec) /*&&
1898 (st->codec->codec_id == CODEC_ID_FLV1 ||
1899 st->codec->codec_id == CODEC_ID_H264 ||
1900 st->codec->codec_id == CODEC_ID_H263 ||
1901 st->codec->codec_id == CODEC_ID_H261 ||
1902 st->codec->codec_id == CODEC_ID_VORBIS ||
1903 st->codec->codec_id == CODEC_ID_MJPEG ||
1904 st->codec->codec_id == CODEC_ID_PNG ||
1905 st->codec->codec_id == CODEC_ID_PAM ||
1906 st->codec->codec_id == CODEC_ID_PGM ||
1907 st->codec->codec_id == CODEC_ID_PGMYUV ||
1908 st->codec->codec_id == CODEC_ID_PBM ||
1909 st->codec->codec_id == CODEC_ID_PPM ||
1910 st->codec->codec_id == CODEC_ID_SHORTEN ||
1911 (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
1912 try_decode_frame(st, pkt->data, pkt->size);
1913
1914 if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
1915 break;
1916 }
1917 count++;
1918 }
1919
1920 // close codecs which where opened in try_decode_frame()
1921 for(i=0;i<ic->nb_streams;i++) {
1922 st = ic->streams[i];
1923 if(st->codec->codec)
1924 avcodec_close(st->codec);
1925 }
1926 for(i=0;i<ic->nb_streams;i++) {
1927 st = ic->streams[i];
1928 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1929 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
1930 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
1931
1932 if(duration_count[i]
1933 && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
1934 //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
1935 st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
1936 int64_t num, den, error, best_error;
1937
1938 num= st->time_base.den*duration_count[i];
1939 den= st->time_base.num*duration_sum[i];
1940
1941 best_error= INT64_MAX;
1942 for(j=1; j<60*12; j++){
1943 error= FFABS(1001*12*num - 1001*j*den);
1944 if(error < best_error){
1945 best_error= error;
1946 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
1947 }
1948 }
1949 for(j=0; j<3; j++){
1950 static const int ticks[]= {24,30,60};
1951 error= FFABS(1001*12*num - 1000*12*den * ticks[j]);
1952 if(error < best_error){
1953 best_error= error;
1954 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, ticks[j]*1000, 1001, INT_MAX);
1955 }
1956 }
1957 }
1958
1959 if (!st->r_frame_rate.num){
1960 if( st->codec->time_base.den * (int64_t)st->time_base.num
1961 <= st->codec->time_base.num * (int64_t)st->time_base.den){
1962 st->r_frame_rate.num = st->codec->time_base.den;
1963 st->r_frame_rate.den = st->codec->time_base.num;
1964 }else{
1965 st->r_frame_rate.num = st->time_base.den;
1966 st->r_frame_rate.den = st->time_base.num;
1967 }
1968 }
1969 }
1970 }
1971
1972 av_estimate_timings(ic);
1973#if 0
1974 /* correct DTS for b frame streams with no timestamps */
1975 for(i=0;i<ic->nb_streams;i++) {
1976 st = ic->streams[i];
1977 if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
1978 if(b-frames){
1979 ppktl = &ic->packet_buffer;
1980 while(ppkt1){
1981 if(ppkt1->stream_index != i)
1982 continue;
1983 if(ppkt1->pkt->dts < 0)
1984 break;
1985 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
1986 break;
1987 ppkt1->pkt->dts -= delta;
1988 ppkt1= ppkt1->next;
1989 }
1990 if(ppkt1)
1991 continue;
1992 st->cur_dts -= delta;
1993 }
1994 }
1995 }
1996#endif
1997 return ret;
1998}
1999
2000/*******************************************************/
2001
2002/**
2003 * start playing a network based stream (e.g. RTSP stream) at the
2004 * current position
2005 */
2006int av_read_play(AVFormatContext *s)
2007{
2008 if (!s->iformat->read_play)
2009 return AVERROR_NOTSUPP;
2010 return s->iformat->read_play(s);
2011}
2012
2013/**
2014 * Pause a network based stream (e.g. RTSP stream).
2015 *
2016 * Use av_read_play() to resume it.
2017 */
2018int av_read_pause(AVFormatContext *s)
2019{
2020 if (!s->iformat->read_pause)
2021 return AVERROR_NOTSUPP;
2022 return s->iformat->read_pause(s);
2023}
2024
2025/**
2026 * Close a media file (but not its codecs).
2027 *
2028 * @param s media file handle
2029 */
2030void av_close_input_file(AVFormatContext *s)
2031{
2032 int i, must_open_file;
2033 AVStream *st;
2034
2035 /* free previous packet */
2036 if (s->cur_st && s->cur_st->parser)
2037 av_free_packet(&s->cur_pkt);
2038
2039 if (s->iformat->read_close)
2040 s->iformat->read_close(s);
2041 for(i=0;i<s->nb_streams;i++) {
2042 /* free all data in a stream component */
2043 st = s->streams[i];
2044 if (st->parser) {
2045 av_parser_close(st->parser);
2046 }
2047 av_free(st->index_entries);
2048 av_free(st->codec->extradata);
2049 av_free(st->codec);
2050 av_free(st);
2051 }
2052 flush_packet_queue(s);
2053 must_open_file = 1;
2054 if (s->iformat->flags & AVFMT_NOFILE) {
2055 must_open_file = 0;
2056 }
2057 if (must_open_file) {
2058 url_fclose(&s->pb);
2059 }
2060 av_freep(&s->priv_data);
2061 av_free(s);
2062}
2063
2064/**
2065 * Add a new stream to a media file.
2066 *
2067 * Can only be called in the read_header() function. If the flag
2068 * AVFMTCTX_NOHEADER is in the format context, then new streams
2069 * can be added in read_packet too.
2070 *
2071 * @param s media file handle
2072 * @param id file format dependent stream id
2073 */
2074AVStream *av_new_stream(AVFormatContext *s, int id)
2075{
2076 AVStream *st;
2077 int i;
2078
2079 if (s->nb_streams >= MAX_STREAMS)
2080 return NULL;
2081
2082 st = av_mallocz(sizeof(AVStream));
2083 if (!st)
2084 return NULL;
2085
2086 st->codec= avcodec_alloc_context();
2087 if (s->iformat) {
2088 /* no default bitrate if decoding */
2089 st->codec->bit_rate = 0;
2090 }
2091 st->index = s->nb_streams;
2092 st->id = id;
2093 st->start_time = AV_NOPTS_VALUE;
2094 st->duration = AV_NOPTS_VALUE;
2095 st->cur_dts = AV_NOPTS_VALUE;
2096
2097 /* default pts settings is MPEG like */
2098 av_set_pts_info(st, 33, 1, 90000);
2099 st->last_IP_pts = AV_NOPTS_VALUE;
2100 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2101 st->pts_buffer[i]= AV_NOPTS_VALUE;
2102
2103 s->streams[s->nb_streams++] = st;
2104 return st;
2105}
2106
2107/************************************************************/
2108/* output media file */
2109
2110int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2111{
2112 int ret;
2113
2114 if (s->oformat->priv_data_size > 0) {
2115 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2116 if (!s->priv_data)
2117 return AVERROR_NOMEM;
2118 } else
2119 s->priv_data = NULL;
2120
2121 if (s->oformat->set_parameters) {
2122 ret = s->oformat->set_parameters(s, ap);
2123 if (ret < 0)
2124 return ret;
2125 }
2126 return 0;
2127}
2128
2129/**
2130 * allocate the stream private data and write the stream header to an
2131 * output media file
2132 *
2133 * @param s media file handle
2134 * @return 0 if OK. AVERROR_xxx if error.
2135 */
2136int av_write_header(AVFormatContext *s)
2137{
2138 int ret, i;
2139 AVStream *st;
2140
2141 // some sanity checks
2142 for(i=0;i<s->nb_streams;i++) {
2143 st = s->streams[i];
2144
2145 switch (st->codec->codec_type) {
2146 case CODEC_TYPE_AUDIO:
2147 if(st->codec->sample_rate<=0){
2148 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2149 return -1;
2150 }
2151 break;
2152 case CODEC_TYPE_VIDEO:
2153 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2154 av_log(s, AV_LOG_ERROR, "time base not set\n");
2155 return -1;
2156 }
2157 if(st->codec->width<=0 || st->codec->height<=0){
2158 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2159 return -1;
2160 }
2161 break;
2162 }
2163 }
2164
2165 if(s->oformat->write_header){
2166 ret = s->oformat->write_header(s);
2167 if (ret < 0)
2168 return ret;
2169 }
2170
2171 /* init PTS generation */
2172 for(i=0;i<s->nb_streams;i++) {
2173 int64_t den = AV_NOPTS_VALUE;
2174 st = s->streams[i];
2175
2176 switch (st->codec->codec_type) {
2177 case CODEC_TYPE_AUDIO:
2178 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2179 break;
2180 case CODEC_TYPE_VIDEO:
2181 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2182 break;
2183 default:
2184 break;
2185 }
2186 if (den != AV_NOPTS_VALUE) {
2187 if (den <= 0)
2188 return AVERROR_INVALIDDATA;
2189 av_frac_init(&st->pts, 0, 0, den);
2190 }
2191 }
2192 return 0;
2193}
2194
2195//FIXME merge with compute_pkt_fields
2196static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
2197 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2198 int num, den, frame_size, i;
2199
2200// av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2201
2202/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2203 return -1;*/
2204
2205 /* duration field */
2206 if (pkt->duration == 0) {
2207 compute_frame_duration(&num, &den, st, NULL, pkt);
2208 if (den && num) {
2209 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
2210 }
2211 }
2212
2213 //XXX/FIXME this is a temporary hack until all encoders output pts
2214 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2215 pkt->dts=
2216// pkt->pts= st->cur_dts;
2217 pkt->pts= st->pts.val;
2218 }
2219
2220 //calculate dts from pts
2221 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
2222 st->pts_buffer[0]= pkt->pts;
2223 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2224 st->pts_buffer[i]= (i-delay-1) * pkt->duration;
2225 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2226 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2227
2228 pkt->dts= st->pts_buffer[0];
2229 }
2230
2231 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2232 av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
2233 return -1;
2234 }
2235 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2236 av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
2237 return -1;
2238 }
2239
2240// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2241 st->cur_dts= pkt->dts;
2242 st->pts.val= pkt->dts;
2243
2244 /* update pts */
2245 switch (st->codec->codec_type) {
2246 case CODEC_TYPE_AUDIO:
2247 frame_size = get_audio_frame_size(st->codec, pkt->size);
2248
2249 /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
2250 but it would be better if we had the real timestamps from the encoder */
2251 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2252 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2253 }
2254 break;
2255 case CODEC_TYPE_VIDEO:
2256 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2257 break;
2258 default:
2259 break;
2260 }
2261 return 0;
2262}
2263
2264static void truncate_ts(AVStream *st, AVPacket *pkt){
2265 int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
2266
2267// if(pkt->dts < 0)
2268// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
2269
2270 pkt->pts &= pts_mask;
2271 pkt->dts &= pts_mask;
2272}
2273
2274/**
2275 * Write a packet to an output media file.
2276 *
2277 * The packet shall contain one audio or video frame.
2278 *
2279 * @param s media file handle
2280 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2281 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2282 */
2283int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2284{
2285 int ret;
2286
2287 ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
2288 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2289 return ret;
2290
2291 truncate_ts(s->streams[pkt->stream_index], pkt);
2292
2293 ret= s->oformat->write_packet(s, pkt);
2294 if(!ret)
2295 ret= url_ferror(&s->pb);
2296 return ret;
2297}
2298
2299/**
2300 * Interleave a packet per DTS in an output media file.
2301 *
2302 * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
2303 * so they cannot be used after it, note calling av_free_packet() on them is still safe.
2304 *
2305 * @param s media file handle
2306 * @param out the interleaved packet will be output here
2307 * @param in the input packet
2308 * @param flush 1 if no further packets are available as input and all
2309 * remaining packets should be output
2310 * @return 1 if a packet was output, 0 if no packet could be output,
2311 * < 0 if an error occured
2312 */
2313int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2314 AVPacketList *pktl, **next_point, *this_pktl;
2315 int stream_count=0;
2316 int streams[MAX_STREAMS];
2317
2318 if(pkt){
2319 AVStream *st= s->streams[ pkt->stream_index];
2320
2321// assert(pkt->destruct != av_destruct_packet); //FIXME
2322
2323 this_pktl = av_mallocz(sizeof(AVPacketList));
2324 this_pktl->pkt= *pkt;
2325 if(pkt->destruct == av_destruct_packet)
2326 pkt->destruct= NULL; // non shared -> must keep original from being freed
2327 else
2328 av_dup_packet(&this_pktl->pkt); //shared -> must dup
2329
2330 next_point = &s->packet_buffer;
2331 while(*next_point){
2332 AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
2333 int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
2334 int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
2335 if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
2336 break;
2337 next_point= &(*next_point)->next;
2338 }
2339 this_pktl->next= *next_point;
2340 *next_point= this_pktl;
2341 }
2342
2343 memset(streams, 0, sizeof(streams));
2344 pktl= s->packet_buffer;
2345 while(pktl){
2346//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
2347 if(streams[ pktl->pkt.stream_index ] == 0)
2348 stream_count++;
2349 streams[ pktl->pkt.stream_index ]++;
2350 pktl= pktl->next;
2351 }
2352
2353 if(s->nb_streams == stream_count || (flush && stream_count)){
2354 pktl= s->packet_buffer;
2355 *out= pktl->pkt;
2356
2357 s->packet_buffer= pktl->next;
2358 av_freep(&pktl);
2359 return 1;
2360 }else{
2361 av_init_packet(out);
2362 return 0;
2363 }
2364}
2365
2366/**
2367 * Interleaves a AVPacket correctly so it can be muxed.
2368 * @param out the interleaved packet will be output here
2369 * @param in the input packet
2370 * @param flush 1 if no further packets are available as input and all
2371 * remaining packets should be output
2372 * @return 1 if a packet was output, 0 if no packet could be output,
2373 * < 0 if an error occured
2374 */
2375static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2376 if(s->oformat->interleave_packet)
2377 return s->oformat->interleave_packet(s, out, in, flush);
2378 else
2379 return av_interleave_packet_per_dts(s, out, in, flush);
2380}
2381
2382/**
2383 * Writes a packet to an output media file ensuring correct interleaving.
2384 *
2385 * The packet must contain one audio or video frame.
2386 * If the packets are already correctly interleaved the application should
2387 * call av_write_frame() instead as its slightly faster, its also important
2388 * to keep in mind that completly non interleaved input will need huge amounts
2389 * of memory to interleave with this, so its prefereable to interleave at the
2390 * demuxer level
2391 *
2392 * @param s media file handle
2393 * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
2394 * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
2395 */
2396int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2397 AVStream *st= s->streams[ pkt->stream_index];
2398
2399 //FIXME/XXX/HACK drop zero sized packets
2400 if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
2401 return 0;
2402
2403//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2404 if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2405 return -1;
2406
2407 if(pkt->dts == AV_NOPTS_VALUE)
2408 return -1;
2409
2410 for(;;){
2411 AVPacket opkt;
2412 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2413 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2414 return ret;
2415
2416 truncate_ts(s->streams[opkt.stream_index], &opkt);
2417 ret= s->oformat->write_packet(s, &opkt);
2418
2419 av_free_packet(&opkt);
2420 pkt= NULL;
2421
2422 if(ret<0)
2423 return ret;
2424 if(url_ferror(&s->pb))
2425 return url_ferror(&s->pb);
2426 }
2427}
2428
2429/**
2430 * @brief Write the stream trailer to an output media file and
2431 * free the file private data.
2432 *
2433 * @param s media file handle
2434 * @return 0 if OK. AVERROR_xxx if error.
2435 */
2436int av_write_trailer(AVFormatContext *s)
2437{
2438 int ret, i;
2439
2440 for(;;){
2441 AVPacket pkt;
2442 ret= av_interleave_packet(s, &pkt, NULL, 1);
2443 if(ret<0) //FIXME cleanup needed for ret<0 ?
2444 goto fail;
2445 if(!ret)
2446 break;
2447
2448 truncate_ts(s->streams[pkt.stream_index], &pkt);
2449 ret= s->oformat->write_packet(s, &pkt);
2450
2451 av_free_packet(&pkt);
2452
2453 if(ret<0)
2454 goto fail;
2455 if(url_ferror(&s->pb))
2456 goto fail;
2457 }
2458
2459 if(s->oformat->write_trailer)
2460 ret = s->oformat->write_trailer(s);
2461fail:
2462 if(ret == 0)
2463 ret=url_ferror(&s->pb);
2464 for(i=0;i<s->nb_streams;i++)
2465 av_freep(&s->streams[i]->priv_data);
2466 av_freep(&s->priv_data);
2467 return ret;
2468}
2469
2470/* "user interface" functions */
2471
2472void dump_format(AVFormatContext *ic,
2473 int index,
2474 const char *url,
2475 int is_output)
2476{
2477 int i, flags;
2478 char buf[256];
2479
2480 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
2481 is_output ? "Output" : "Input",
2482 index,
2483 is_output ? ic->oformat->name : ic->iformat->name,
2484 is_output ? "to" : "from", url);
2485 if (!is_output) {
2486 av_log(NULL, AV_LOG_INFO, " Duration: ");
2487 if (ic->duration != AV_NOPTS_VALUE) {
2488 int hours, mins, secs, us;
2489 secs = ic->duration / AV_TIME_BASE;
2490 us = ic->duration % AV_TIME_BASE;
2491 mins = secs / 60;
2492 secs %= 60;
2493 hours = mins / 60;
2494 mins %= 60;
2495 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
2496 (10 * us) / AV_TIME_BASE);
2497 } else {
2498 av_log(NULL, AV_LOG_INFO, "N/A");
2499 }
2500 if (ic->start_time != AV_NOPTS_VALUE) {
2501 int secs, us;
2502 av_log(NULL, AV_LOG_INFO, ", start: ");
2503 secs = ic->start_time / AV_TIME_BASE;
2504 us = ic->start_time % AV_TIME_BASE;
2505 av_log(NULL, AV_LOG_INFO, "%d.%06d",
2506 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
2507 }
2508 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
2509 if (ic->bit_rate) {
2510 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
2511 } else {
2512 av_log(NULL, AV_LOG_INFO, "N/A");
2513 }
2514 av_log(NULL, AV_LOG_INFO, "\n");
2515 }
2516 for(i=0;i<ic->nb_streams;i++) {
2517 AVStream *st = ic->streams[i];
2518 int g= ff_gcd(st->time_base.num, st->time_base.den);
2519 avcodec_string(buf, sizeof(buf), st->codec, is_output);
2520 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
2521 /* the pid is an important information, so we display it */
2522 /* XXX: add a generic system */
2523 if (is_output)
2524 flags = ic->oformat->flags;
2525 else
2526 flags = ic->iformat->flags;
2527 if (flags & AVFMT_SHOW_IDS) {
2528 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
2529 }
2530 if (strlen(st->language) > 0) {
2531 av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
2532 }
2533 av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
2534 av_log(NULL, AV_LOG_INFO, ": %s", buf);
2535 if(st->codec->codec_type == CODEC_TYPE_VIDEO){
2536 if(st->r_frame_rate.den && st->r_frame_rate.num)
2537 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
2538/* else if(st->time_base.den && st->time_base.num)
2539 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
2540 else
2541 av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
2542 }
2543 av_log(NULL, AV_LOG_INFO, "\n");
2544 }
2545}
2546
2547typedef struct {
2548 const char *abv;
2549 int width, height;
2550 int frame_rate, frame_rate_base;
2551} AbvEntry;
2552
2553static AbvEntry frame_abvs[] = {
2554 { "ntsc", 720, 480, 30000, 1001 },
2555 { "pal", 720, 576, 25, 1 },
2556 { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
2557 { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
2558 { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
2559 { "spal", 768, 576, 25, 1 }, /* square pixel pal */
2560 { "film", 352, 240, 24, 1 },
2561 { "ntsc-film", 352, 240, 24000, 1001 },
2562 { "sqcif", 128, 96, 0, 0 },
2563 { "qcif", 176, 144, 0, 0 },
2564 { "cif", 352, 288, 0, 0 },
2565 { "4cif", 704, 576, 0, 0 },
2566};
2567
2568/**
2569 * parses width and height out of string str.
2570 */
2571int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
2572{
2573 int i;
2574 int n = sizeof(frame_abvs) / sizeof(AbvEntry);
2575 const char *p;
2576 int frame_width = 0, frame_height = 0;
2577
2578 for(i=0;i<n;i++) {
2579 if (!strcmp(frame_abvs[i].abv, str)) {
2580 frame_width = frame_abvs[i].width;
2581 frame_height = frame_abvs[i].height;
2582 break;
2583 }
2584 }
2585 if (i == n) {
2586 p = str;
2587 frame_width = strtol(p, (char **)&p, 10);
2588 if (*p)
2589 p++;
2590 frame_height = strtol(p, (char **)&p, 10);
2591 }
2592 if (frame_width <= 0 || frame_height <= 0)
2593 return -1;
2594 *width_ptr = frame_width;
2595 *height_ptr = frame_height;
2596 return 0;
2597}
2598
2599/**
2600 * Converts frame rate from string to a fraction.
2601 *
2602 * First we try to get an exact integer or fractional frame rate.
2603 * If this fails we convert the frame rate to a double and return
2604 * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
2605 */
2606int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
2607{
2608 int i;
2609 char* cp;
2610
2611 /* First, we check our abbreviation table */
2612 for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
2613 if (!strcmp(frame_abvs[i].abv, arg)) {
2614 *frame_rate = frame_abvs[i].frame_rate;
2615 *frame_rate_base = frame_abvs[i].frame_rate_base;
2616 return 0;
2617 }
2618
2619 /* Then, we try to parse it as fraction */
2620 cp = strchr(arg, '/');
2621 if (!cp)
2622 cp = strchr(arg, ':');
2623 if (cp) {
2624 char* cpp;
2625 *frame_rate = strtol(arg, &cpp, 10);
2626 if (cpp != arg || cpp == cp)
2627 *frame_rate_base = strtol(cp+1, &cpp, 10);
2628 else
2629 *frame_rate = 0;
2630 }
2631 else {
2632 /* Finally we give up and parse it as double */
2633 AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
2634 *frame_rate_base = time_base.den;
2635 *frame_rate = time_base.num;
2636 }
2637 if (!*frame_rate || !*frame_rate_base)
2638 return -1;
2639 else
2640 return 0;
2641}
2642
2643/**
2644 * Converts date string to number of seconds since Jan 1st, 1970.
2645 *
2646 * @code
2647 * Syntax:
2648 * - If not a duration:
2649 * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
2650 * Time is localtime unless Z is suffixed to the end. In this case GMT
2651 * Return the date in micro seconds since 1970
2652 *
2653 * - If a duration:
2654 * HH[:MM[:SS[.m...]]]
2655 * S+[.m...]
2656 * @endcode
2657 */
2658#ifndef CONFIG_WINCE
2659int64_t parse_date(const char *datestr, int duration)
2660{
2661 const char *p;
2662 int64_t t;
2663 struct tm dt;
2664 int i;
2665 static const char *date_fmt[] = {
2666 "%Y-%m-%d",
2667 "%Y%m%d",
2668 };
2669 static const char *time_fmt[] = {
2670 "%H:%M:%S",
2671 "%H%M%S",
2672 };
2673 const char *q;
2674 int is_utc, len;
2675 char lastch;
2676 int negative = 0;
2677
2678#undef time
2679 time_t now = time(0);
2680
2681 len = strlen(datestr);
2682 if (len > 0)
2683 lastch = datestr[len - 1];
2684 else
2685 lastch = '\0';
2686 is_utc = (lastch == 'z' || lastch == 'Z');
2687
2688 memset(&dt, 0, sizeof(dt));
2689
2690 p = datestr;
2691 q = NULL;
2692 if (!duration) {
2693 for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
2694 q = small_strptime(p, date_fmt[i], &dt);
2695 if (q) {
2696 break;
2697 }
2698 }
2699
2700 if (!q) {
2701 if (is_utc) {
2702 dt = *gmtime(&now);
2703 } else {
2704 dt = *localtime(&now);
2705 }
2706 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
2707 } else {
2708 p = q;
2709 }
2710
2711 if (*p == 'T' || *p == 't' || *p == ' ')
2712 p++;
2713
2714 for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
2715 q = small_strptime(p, time_fmt[i], &dt);
2716 if (q) {
2717 break;
2718 }
2719 }
2720 } else {
2721 if (p[0] == '-') {
2722 negative = 1;
2723 ++p;
2724 }
2725 q = small_strptime(p, time_fmt[0], &dt);
2726 if (!q) {
2727 dt.tm_sec = strtol(p, (char **)&q, 10);
2728 dt.tm_min = 0;
2729 dt.tm_hour = 0;
2730 }
2731 }
2732
2733 /* Now we have all the fields that we can get */
2734 if (!q) {
2735 if (duration)
2736 return 0;
2737 else
2738 return now * int64_t_C(1000000);
2739 }
2740
2741 if (duration) {
2742 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
2743 } else {
2744 dt.tm_isdst = -1; /* unknown */
2745 if (is_utc) {
2746 t = mktimegm(&dt);
2747 } else {
2748 t = mktime(&dt);
2749 }
2750 }
2751
2752 t *= 1000000;
2753
2754 if (*q == '.') {
2755 int val, n;
2756 q++;
2757 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
2758 if (!isdigit(*q))
2759 break;
2760 val += n * (*q - '0');
2761 }
2762 t += val;
2763 }
2764 return negative ? -t : t;
2765}
2766#endif /* CONFIG_WINCE */
2767
2768/**
2769 * Attempts to find a specific tag in a URL.
2770 *
2771 * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
2772 * Return 1 if found.
2773 */
2774int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
2775{
2776 const char *p;
2777 char tag[128], *q;
2778
2779 p = info;
2780 if (*p == '?')
2781 p++;
2782 for(;;) {
2783 q = tag;
2784 while (*p != '\0' && *p != '=' && *p != '&') {
2785 if ((q - tag) < sizeof(tag) - 1)
2786 *q++ = *p;
2787 p++;
2788 }
2789 *q = '\0';
2790 q = arg;
2791 if (*p == '=') {
2792 p++;
2793 while (*p != '&' && *p != '\0') {
2794 if ((q - arg) < arg_size - 1) {
2795 if (*p == '+')
2796 *q++ = ' ';
2797 else
2798 *q++ = *p;
2799 }
2800 p++;
2801 }
2802 *q = '\0';
2803 }
2804 if (!strcmp(tag, tag1))
2805 return 1;
2806 if (*p != '&')
2807 break;
2808 p++;
2809 }
2810 return 0;
2811}
2812
2813/**
2814 * Returns in 'buf' the path with '%d' replaced by number.
2815
2816 * Also handles the '%0nd' format where 'n' is the total number
2817 * of digits and '%%'.
2818 *
2819 * @param buf destination buffer
2820 * @param buf_size destination buffer size
2821 * @param path numbered sequence string
2822 * @number frame number
2823 * @return 0 if OK, -1 if format error.
2824 */
2825int av_get_frame_filename(char *buf, int buf_size,
2826 const char *path, int number)
2827{
2828 const char *p;
2829 char *q, buf1[20], c;
2830 int nd, len, percentd_found;
2831
2832 q = buf;
2833 p = path;
2834 percentd_found = 0;
2835 for(;;) {
2836 c = *p++;
2837 if (c == '\0')
2838 break;
2839 if (c == '%') {
2840 do {
2841 nd = 0;
2842 while (isdigit(*p)) {
2843 nd = nd * 10 + *p++ - '0';
2844 }
2845 c = *p++;
2846 } while (isdigit(c));
2847
2848 switch(c) {
2849 case '%':
2850 goto addchar;
2851 case 'd':
2852 if (percentd_found)
2853 goto fail;
2854 percentd_found = 1;
2855 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
2856 len = strlen(buf1);
2857 if ((q - buf + len) > buf_size - 1)
2858 goto fail;
2859 memcpy(q, buf1, len);
2860 q += len;
2861 break;
2862 default:
2863 goto fail;
2864 }
2865 } else {
2866 addchar:
2867 if ((q - buf) < buf_size - 1)
2868 *q++ = c;
2869 }
2870 }
2871 if (!percentd_found)
2872 goto fail;
2873 *q = '\0';
2874 return 0;
2875 fail:
2876 *q = '\0';
2877 return -1;
2878}
2879
2880/**
2881 * Print nice hexa dump of a buffer
2882 * @param f stream for output
2883 * @param buf buffer
2884 * @param size buffer size
2885 */
2886void av_hex_dump(FILE *f, uint8_t *buf, int size)
2887{
2888 int len, i, j, c;
2889
2890 for(i=0;i<size;i+=16) {
2891 len = size - i;
2892 if (len > 16)
2893 len = 16;
2894 fprintf(f, "%08x ", i);
2895 for(j=0;j<16;j++) {
2896 if (j < len)
2897 fprintf(f, " %02x", buf[i+j]);
2898 else
2899 fprintf(f, " ");
2900 }
2901 fprintf(f, " ");
2902 for(j=0;j<len;j++) {
2903 c = buf[i+j];
2904 if (c < ' ' || c > '~')
2905 c = '.';
2906 fprintf(f, "%c", c);
2907 }
2908 fprintf(f, "\n");
2909 }
2910}
2911
2912/**
2913 * Print on 'f' a nice dump of a packet
2914 * @param f stream for output
2915 * @param pkt packet to dump
2916 * @param dump_payload true if the payload must be displayed too
2917 */
2918 //FIXME needs to know the time_base
2919void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
2920{
2921 fprintf(f, "stream #%d:\n", pkt->stream_index);
2922 fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
2923 fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
2924 /* DTS is _always_ valid after av_read_frame() */
2925 fprintf(f, " dts=");
2926 if (pkt->dts == AV_NOPTS_VALUE)
2927 fprintf(f, "N/A");
2928 else
2929 fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
2930 /* PTS may be not known if B frames are present */
2931 fprintf(f, " pts=");
2932 if (pkt->pts == AV_NOPTS_VALUE)
2933 fprintf(f, "N/A");
2934 else
2935 fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
2936 fprintf(f, "\n");
2937 fprintf(f, " size=%d\n", pkt->size);
2938 if (dump_payload)
2939 av_hex_dump(f, pkt->data, pkt->size);
2940}
2941
2942void url_split(char *proto, int proto_size,
2943 char *authorization, int authorization_size,
2944 char *hostname, int hostname_size,
2945 int *port_ptr,
2946 char *path, int path_size,
2947 const char *url)
2948{
2949 const char *p;
2950 char *q;
2951 int port;
2952
2953 port = -1;
2954
2955 p = url;
2956 q = proto;
2957 while (*p != ':' && *p != '\0') {
2958 if ((q - proto) < proto_size - 1)
2959 *q++ = *p;
2960 p++;
2961 }
2962 if (proto_size > 0)
2963 *q = '\0';
2964 if (authorization_size > 0)
2965 authorization[0] = '\0';
2966 if (*p == '\0') {
2967 if (proto_size > 0)
2968 proto[0] = '\0';
2969 if (hostname_size > 0)
2970 hostname[0] = '\0';
2971 p = url;
2972 } else {
2973 char *at,*slash; // PETR: position of '@' character and '/' character
2974
2975 p++;
2976 if (*p == '/')
2977 p++;
2978 if (*p == '/')
2979 p++;
2980 at = strchr(p,'@'); // PETR: get the position of '@'
2981 slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
2982 if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
2983
2984 q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
2985
2986 while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
2987 if (*p == '@') { // PETR: passed '@'
2988 if (authorization_size > 0)
2989 *q = '\0';
2990 q = hostname;
2991 at = NULL;
2992 } else if (!at) { // PETR: hostname
2993 if ((q - hostname) < hostname_size - 1)
2994 *q++ = *p;
2995 } else {
2996 if ((q - authorization) < authorization_size - 1)
2997 *q++ = *p;
2998 }
2999 p++;
3000 }
3001 if (hostname_size > 0)
3002 *q = '\0';
3003 if (*p == ':') {
3004 p++;
3005 port = strtoul(p, (char **)&p, 10);
3006 }
3007 }
3008 if (port_ptr)
3009 *port_ptr = port;
3010 pstrcpy(path, path_size, p);
3011}
3012
3013/**
3014 * Set the pts for a given stream.
3015 *
3016 * @param s stream
3017 * @param pts_wrap_bits number of bits effectively used by the pts
3018 * (used for wrap control, 33 is the value for MPEG)
3019 * @param pts_num numerator to convert to seconds (MPEG: 1)
3020 * @param pts_den denominator to convert to seconds (MPEG: 90000)
3021 */
3022void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3023 int pts_num, int pts_den)
3024{
3025 s->pts_wrap_bits = pts_wrap_bits;
3026 s->time_base.num = pts_num;
3027 s->time_base.den = pts_den;
3028}
3029
3030/* fraction handling */
3031
3032/**
3033 * f = val + (num / den) + 0.5.
3034 *
3035 * 'num' is normalized so that it is such as 0 <= num < den.
3036 *
3037 * @param f fractional number
3038 * @param val integer value
3039 * @param num must be >= 0
3040 * @param den must be >= 1
3041 */
3042static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
3043{
3044 num += (den >> 1);
3045 if (num >= den) {
3046 val += num / den;
3047 num = num % den;
3048 }
3049 f->val = val;
3050 f->num = num;
3051 f->den = den;
3052}
3053
3054/**
3055 * Set f to (val + 0.5).
3056 */
3057static void av_frac_set(AVFrac *f, int64_t val)
3058{
3059 f->val = val;
3060 f->num = f->den >> 1;
3061}
3062
3063/**
3064 * Fractionnal addition to f: f = f + (incr / f->den).
3065 *
3066 * @param f fractional number
3067 * @param incr increment, can be positive or negative
3068 */
3069static void av_frac_add(AVFrac *f, int64_t incr)
3070{
3071 int64_t num, den;
3072
3073 num = f->num + incr;
3074 den = f->den;
3075 if (num < 0) {
3076 f->val += num / den;
3077 num = num % den;
3078 if (num < 0) {
3079 num += den;
3080 f->val--;
3081 }
3082 } else if (num >= den) {
3083 f->val += num / den;
3084 num = num % den;
3085 }
3086 f->num = num;
3087}
Note: See TracBrowser for help on using the repository browser.