source: trunk/libavcodec/vc1.c@ 257

Last change on this file since 257 was 257, checked in by vladest, 18 years ago

Fight with av parser. stupid thing. some avi+mp3 doesnt works
ffmpeg update
some real media sync fixes

File size: 148.6 KB
Line 
1/*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2006 Konstantin Shishkov
4 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24/**
25 * @file vc1.c
26 * VC-1 and WMV3 decoder
27 *
28 */
29#include "common.h"
30#include "dsputil.h"
31#include "avcodec.h"
32#include "mpegvideo.h"
33#include "vc1data.h"
34#include "vc1acdata.h"
35
36#undef NDEBUG
37#include <assert.h>
38
39extern const uint32_t ff_table0_dc_lum[120][2], ff_table1_dc_lum[120][2];
40extern const uint32_t ff_table0_dc_chroma[120][2], ff_table1_dc_chroma[120][2];
41extern VLC ff_msmp4_dc_luma_vlc[2], ff_msmp4_dc_chroma_vlc[2];
42#define MB_INTRA_VLC_BITS 9
43extern VLC ff_msmp4_mb_i_vlc;
44extern const uint16_t ff_msmp4_mb_i_table[64][2];
45#define DC_VLC_BITS 9
46#define AC_VLC_BITS 9
47static const uint16_t table_mb_intra[64][2];
48
49
50/** Available Profiles */
51//@{
52enum Profile {
53 PROFILE_SIMPLE,
54 PROFILE_MAIN,
55 PROFILE_COMPLEX, ///< TODO: WMV9 specific
56 PROFILE_ADVANCED
57};
58//@}
59
60/** Sequence quantizer mode */
61//@{
62enum QuantMode {
63 QUANT_FRAME_IMPLICIT, ///< Implicitly specified at frame level
64 QUANT_FRAME_EXPLICIT, ///< Explicitly specified at frame level
65 QUANT_NON_UNIFORM, ///< Non-uniform quant used for all frames
66 QUANT_UNIFORM ///< Uniform quant used for all frames
67};
68//@}
69
70/** Where quant can be changed */
71//@{
72enum DQProfile {
73 DQPROFILE_FOUR_EDGES,
74 DQPROFILE_DOUBLE_EDGES,
75 DQPROFILE_SINGLE_EDGE,
76 DQPROFILE_ALL_MBS
77};
78//@}
79
80/** @name Where quant can be changed
81 */
82//@{
83enum DQSingleEdge {
84 DQSINGLE_BEDGE_LEFT,
85 DQSINGLE_BEDGE_TOP,
86 DQSINGLE_BEDGE_RIGHT,
87 DQSINGLE_BEDGE_BOTTOM
88};
89//@}
90
91/** Which pair of edges is quantized with ALTPQUANT */
92//@{
93enum DQDoubleEdge {
94 DQDOUBLE_BEDGE_TOPLEFT,
95 DQDOUBLE_BEDGE_TOPRIGHT,
96 DQDOUBLE_BEDGE_BOTTOMRIGHT,
97 DQDOUBLE_BEDGE_BOTTOMLEFT
98};
99//@}
100
101/** MV modes for P frames */
102//@{
103enum MVModes {
104 MV_PMODE_1MV_HPEL_BILIN,
105 MV_PMODE_1MV,
106 MV_PMODE_1MV_HPEL,
107 MV_PMODE_MIXED_MV,
108 MV_PMODE_INTENSITY_COMP
109};
110//@}
111
112/** @name MV types for B frames */
113//@{
114enum BMVTypes {
115 BMV_TYPE_BACKWARD,
116 BMV_TYPE_FORWARD,
117 BMV_TYPE_INTERPOLATED
118};
119//@}
120
121/** @name Block types for P/B frames */
122//@{
123enum TransformTypes {
124 TT_8X8,
125 TT_8X4_BOTTOM,
126 TT_8X4_TOP,
127 TT_8X4, //Both halves
128 TT_4X8_RIGHT,
129 TT_4X8_LEFT,
130 TT_4X8, //Both halves
131 TT_4X4
132};
133//@}
134
135/** Table for conversion between TTBLK and TTMB */
136static const int ttblk_to_tt[3][8] = {
137 { TT_8X4, TT_4X8, TT_8X8, TT_4X4, TT_8X4_TOP, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT },
138 { TT_8X8, TT_4X8_RIGHT, TT_4X8_LEFT, TT_4X4, TT_8X4, TT_4X8, TT_8X4_BOTTOM, TT_8X4_TOP },
139 { TT_8X8, TT_4X8, TT_4X4, TT_8X4_BOTTOM, TT_4X8_RIGHT, TT_4X8_LEFT, TT_8X4, TT_8X4_TOP }
140};
141
142static const int ttfrm_to_tt[4] = { TT_8X8, TT_8X4, TT_4X8, TT_4X4 };
143
144/** MV P mode - the 5th element is only used for mode 1 */
145static const uint8_t mv_pmode_table[2][5] = {
146 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_MIXED_MV },
147 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_INTENSITY_COMP, MV_PMODE_1MV_HPEL_BILIN }
148};
149static const uint8_t mv_pmode_table2[2][4] = {
150 { MV_PMODE_1MV_HPEL_BILIN, MV_PMODE_1MV, MV_PMODE_1MV_HPEL, MV_PMODE_MIXED_MV },
151 { MV_PMODE_1MV, MV_PMODE_MIXED_MV, MV_PMODE_1MV_HPEL, MV_PMODE_1MV_HPEL_BILIN }
152};
153
154/** One more frame type */
155#define BI_TYPE 7
156
157static const int fps_nr[5] = { 24, 25, 30, 50, 60 },
158 fps_dr[2] = { 1000, 1001 };
159static const uint8_t pquant_table[3][32] = {
160 { /* Implicit quantizer */
161 0, 1, 2, 3, 4, 5, 6, 7, 8, 6, 7, 8, 9, 10, 11, 12,
162 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 29, 31
163 },
164 { /* Explicit quantizer, pquantizer uniform */
165 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
166 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
167 },
168 { /* Explicit quantizer, pquantizer non-uniform */
169 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
170 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31
171 }
172};
173
174/** @name VC-1 VLC tables and defines
175 * @todo TODO move this into the context
176 */
177//@{
178#define VC1_BFRACTION_VLC_BITS 7
179static VLC vc1_bfraction_vlc;
180#define VC1_IMODE_VLC_BITS 4
181static VLC vc1_imode_vlc;
182#define VC1_NORM2_VLC_BITS 3
183static VLC vc1_norm2_vlc;
184#define VC1_NORM6_VLC_BITS 9
185static VLC vc1_norm6_vlc;
186/* Could be optimized, one table only needs 8 bits */
187#define VC1_TTMB_VLC_BITS 9 //12
188static VLC vc1_ttmb_vlc[3];
189#define VC1_MV_DIFF_VLC_BITS 9 //15
190static VLC vc1_mv_diff_vlc[4];
191#define VC1_CBPCY_P_VLC_BITS 9 //14
192static VLC vc1_cbpcy_p_vlc[4];
193#define VC1_4MV_BLOCK_PATTERN_VLC_BITS 6
194static VLC vc1_4mv_block_pattern_vlc[4];
195#define VC1_TTBLK_VLC_BITS 5
196static VLC vc1_ttblk_vlc[3];
197#define VC1_SUBBLKPAT_VLC_BITS 6
198static VLC vc1_subblkpat_vlc[3];
199
200static VLC vc1_ac_coeff_table[8];
201//@}
202
203enum CodingSet {
204 CS_HIGH_MOT_INTRA = 0,
205 CS_HIGH_MOT_INTER,
206 CS_LOW_MOT_INTRA,
207 CS_LOW_MOT_INTER,
208 CS_MID_RATE_INTRA,
209 CS_MID_RATE_INTER,
210 CS_HIGH_RATE_INTRA,
211 CS_HIGH_RATE_INTER
212};
213
214/** @name Overlap conditions for Advanced Profile */
215//@{
216enum COTypes {
217 CONDOVER_NONE = 0,
218 CONDOVER_ALL,
219 CONDOVER_SELECT
220};
221//@}
222
223
224/** The VC1 Context
225 * @fixme Change size wherever another size is more efficient
226 * Many members are only used for Advanced Profile
227 */
228typedef struct VC1Context{
229 MpegEncContext s;
230
231 int bits;
232
233 /** Simple/Main Profile sequence header */
234 //@{
235 int res_sm; ///< reserved, 2b
236 int res_x8; ///< reserved
237 int multires; ///< frame-level RESPIC syntax element present
238 int res_fasttx; ///< reserved, always 1
239 int res_transtab; ///< reserved, always 0
240 int rangered; ///< RANGEREDFRM (range reduction) syntax element present
241 ///< at frame level
242 int res_rtm_flag; ///< reserved, set to 1
243 int reserved; ///< reserved
244 //@}
245
246 /** Advanced Profile */
247 //@{
248 int level; ///< 3bits, for Advanced/Simple Profile, provided by TS layer
249 int chromaformat; ///< 2bits, 2=4:2:0, only defined
250 int postprocflag; ///< Per-frame processing suggestion flag present
251 int broadcast; ///< TFF/RFF present
252 int interlace; ///< Progressive/interlaced (RPTFTM syntax element)
253 int tfcntrflag; ///< TFCNTR present
254 int panscanflag; ///< NUMPANSCANWIN, TOPLEFT{X,Y}, BOTRIGHT{X,Y} present
255 int extended_dmv; ///< Additional extended dmv range at P/B frame-level
256 int color_prim; ///< 8bits, chroma coordinates of the color primaries
257 int transfer_char; ///< 8bits, Opto-electronic transfer characteristics
258 int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
259 int hrd_param_flag; ///< Presence of Hypothetical Reference
260 ///< Decoder parameters
261 int psf; ///< Progressive Segmented Frame
262 //@}
263
264 /** Sequence header data for all Profiles
265 * TODO: choose between ints, uint8_ts and monobit flags
266 */
267 //@{
268 int profile; ///< 2bits, Profile
269 int frmrtq_postproc; ///< 3bits,
270 int bitrtq_postproc; ///< 5bits, quantized framerate-based postprocessing strength
271 int fastuvmc; ///< Rounding of qpel vector to hpel ? (not in Simple)
272 int extended_mv; ///< Ext MV in P/B (not in Simple)
273 int dquant; ///< How qscale varies with MBs, 2bits (not in Simple)
274 int vstransform; ///< variable-size [48]x[48] transform type + info
275 int overlap; ///< overlapped transforms in use
276 int quantizer_mode; ///< 2bits, quantizer mode used for sequence, see QUANT_*
277 int finterpflag; ///< INTERPFRM present
278 //@}
279
280 /** Frame decoding info for all profiles */
281 //@{
282 uint8_t mv_mode; ///< MV coding monde
283 uint8_t mv_mode2; ///< Secondary MV coding mode (B frames)
284 int k_x; ///< Number of bits for MVs (depends on MV range)
285 int k_y; ///< Number of bits for MVs (depends on MV range)
286 int range_x, range_y; ///< MV range
287 uint8_t pq, altpq; ///< Current/alternate frame quantizer scale
288 /** pquant parameters */
289 //@{
290 uint8_t dquantfrm;
291 uint8_t dqprofile;
292 uint8_t dqsbedge;
293 uint8_t dqbilevel;
294 //@}
295 /** AC coding set indexes
296 * @see 8.1.1.10, p(1)10
297 */
298 //@{
299 int c_ac_table_index; ///< Chroma index from ACFRM element
300 int y_ac_table_index; ///< Luma index from AC2FRM element
301 //@}
302 int ttfrm; ///< Transform type info present at frame level
303 uint8_t ttmbf; ///< Transform type flag
304 uint8_t ttblk4x4; ///< Value of ttblk which indicates a 4x4 transform
305 int codingset; ///< index of current table set from 11.8 to use for luma block decoding
306 int codingset2; ///< index of current table set from 11.8 to use for chroma block decoding
307 int pqindex; ///< raw pqindex used in coding set selection
308 int a_avail, c_avail;
309 uint8_t *mb_type_base, *mb_type[3];
310
311
312 /** Luma compensation parameters */
313 //@{
314 uint8_t lumscale;
315 uint8_t lumshift;
316 //@}
317 int16_t bfraction; ///< Relative position % anchors=> how to scale MVs
318 uint8_t halfpq; ///< Uniform quant over image and qp+.5
319 uint8_t respic; ///< Frame-level flag for resized images
320 int buffer_fullness; ///< HRD info
321 /** Ranges:
322 * -# 0 -> [-64n 63.f] x [-32, 31.f]
323 * -# 1 -> [-128, 127.f] x [-64, 63.f]
324 * -# 2 -> [-512, 511.f] x [-128, 127.f]
325 * -# 3 -> [-1024, 1023.f] x [-256, 255.f]
326 */
327 uint8_t mvrange;
328 uint8_t pquantizer; ///< Uniform (over sequence) quantizer in use
329 VLC *cbpcy_vlc; ///< CBPCY VLC table
330 int tt_index; ///< Index for Transform Type tables
331 uint8_t* mv_type_mb_plane; ///< bitplane for mv_type == (4MV)
332 uint8_t* direct_mb_plane; ///< bitplane for "direct" MBs
333 int mv_type_is_raw; ///< mv type mb plane is not coded
334 int dmb_is_raw; ///< direct mb plane is raw
335 int skip_is_raw; ///< skip mb plane is not coded
336 uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
337 int use_ic; ///< use intensity compensation in B-frames
338 int rnd; ///< rounding control
339
340 /** Frame decoding info for S/M profiles only */
341 //@{
342 uint8_t rangeredfrm; ///< out_sample = CLIP((in_sample-128)*2+128)
343 uint8_t interpfrm;
344 //@}
345
346 /** Frame decoding info for Advanced profile */
347 //@{
348 uint8_t fcm; ///< 0->Progressive, 2->Frame-Interlace, 3->Field-Interlace
349 uint8_t numpanscanwin;
350 uint8_t tfcntr;
351 uint8_t rptfrm, tff, rff;
352 uint16_t topleftx;
353 uint16_t toplefty;
354 uint16_t bottomrightx;
355 uint16_t bottomrighty;
356 uint8_t uvsamp;
357 uint8_t postproc;
358 int hrd_num_leaky_buckets;
359 uint8_t bit_rate_exponent;
360 uint8_t buffer_size_exponent;
361 uint8_t* acpred_plane; ///< AC prediction flags bitplane
362 int acpred_is_raw;
363 uint8_t* over_flags_plane; ///< Overflags bitplane
364 int overflg_is_raw;
365 uint8_t condover;
366 uint16_t *hrd_rate, *hrd_buffer;
367 uint8_t *hrd_fullness;
368 uint8_t range_mapy_flag;
369 uint8_t range_mapuv_flag;
370 uint8_t range_mapy;
371 uint8_t range_mapuv;
372 //@}
373
374 int p_frame_skipped;
375 int bi_type;
376} VC1Context;
377
378/**
379 * Get unary code of limited length
380 * @fixme FIXME Slow and ugly
381 * @param gb GetBitContext
382 * @param[in] stop The bitstop value (unary code of 1's or 0's)
383 * @param[in] len Maximum length
384 * @return Unary length/index
385 */
386static int get_prefix(GetBitContext *gb, int stop, int len)
387{
388#if 1
389 int i;
390
391 for(i = 0; i < len && get_bits1(gb) != stop; i++);
392 return i;
393/* int i = 0, tmp = !stop;
394
395 while (i != len && tmp != stop)
396 {
397 tmp = get_bits(gb, 1);
398 i++;
399 }
400 if (i == len && tmp != stop) return len+1;
401 return i;*/
402#else
403 unsigned int buf;
404 int log;
405
406 OPEN_READER(re, gb);
407 UPDATE_CACHE(re, gb);
408 buf=GET_CACHE(re, gb); //Still not sure
409 if (stop) buf = ~buf;
410
411 log= av_log2(-buf); //FIXME: -?
412 if (log < limit){
413 LAST_SKIP_BITS(re, gb, log+1);
414 CLOSE_READER(re, gb);
415 return log;
416 }
417
418 LAST_SKIP_BITS(re, gb, limit);
419 CLOSE_READER(re, gb);
420 return limit;
421#endif
422}
423
424static inline int decode210(GetBitContext *gb){
425 int n;
426 n = get_bits1(gb);
427 if (n == 1)
428 return 0;
429 else
430 return 2 - get_bits1(gb);
431}
432
433/**
434 * Init VC-1 specific tables and VC1Context members
435 * @param v The VC1Context to initialize
436 * @return Status
437 */
438static int vc1_init_common(VC1Context *v)
439{
440 static int done = 0;
441 int i = 0;
442
443 v->hrd_rate = v->hrd_buffer = NULL;
444
445 /* VLC tables */
446 if(!done)
447 {
448 done = 1;
449 init_vlc(&vc1_bfraction_vlc, VC1_BFRACTION_VLC_BITS, 23,
450 vc1_bfraction_bits, 1, 1,
451 vc1_bfraction_codes, 1, 1, 1);
452 init_vlc(&vc1_norm2_vlc, VC1_NORM2_VLC_BITS, 4,
453 vc1_norm2_bits, 1, 1,
454 vc1_norm2_codes, 1, 1, 1);
455 init_vlc(&vc1_norm6_vlc, VC1_NORM6_VLC_BITS, 64,
456 vc1_norm6_bits, 1, 1,
457 vc1_norm6_codes, 2, 2, 1);
458 init_vlc(&vc1_imode_vlc, VC1_IMODE_VLC_BITS, 7,
459 vc1_imode_bits, 1, 1,
460 vc1_imode_codes, 1, 1, 1);
461 for (i=0; i<3; i++)
462 {
463 init_vlc(&vc1_ttmb_vlc[i], VC1_TTMB_VLC_BITS, 16,
464 vc1_ttmb_bits[i], 1, 1,
465 vc1_ttmb_codes[i], 2, 2, 1);
466 init_vlc(&vc1_ttblk_vlc[i], VC1_TTBLK_VLC_BITS, 8,
467 vc1_ttblk_bits[i], 1, 1,
468 vc1_ttblk_codes[i], 1, 1, 1);
469 init_vlc(&vc1_subblkpat_vlc[i], VC1_SUBBLKPAT_VLC_BITS, 15,
470 vc1_subblkpat_bits[i], 1, 1,
471 vc1_subblkpat_codes[i], 1, 1, 1);
472 }
473 for(i=0; i<4; i++)
474 {
475 init_vlc(&vc1_4mv_block_pattern_vlc[i], VC1_4MV_BLOCK_PATTERN_VLC_BITS, 16,
476 vc1_4mv_block_pattern_bits[i], 1, 1,
477 vc1_4mv_block_pattern_codes[i], 1, 1, 1);
478 init_vlc(&vc1_cbpcy_p_vlc[i], VC1_CBPCY_P_VLC_BITS, 64,
479 vc1_cbpcy_p_bits[i], 1, 1,
480 vc1_cbpcy_p_codes[i], 2, 2, 1);
481 init_vlc(&vc1_mv_diff_vlc[i], VC1_MV_DIFF_VLC_BITS, 73,
482 vc1_mv_diff_bits[i], 1, 1,
483 vc1_mv_diff_codes[i], 2, 2, 1);
484 }
485 for(i=0; i<8; i++)
486 init_vlc(&vc1_ac_coeff_table[i], AC_VLC_BITS, vc1_ac_sizes[i],
487 &vc1_ac_tables[i][0][1], 8, 4,
488 &vc1_ac_tables[i][0][0], 8, 4, 1);
489 init_vlc(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64,
490 &ff_msmp4_mb_i_table[0][1], 4, 2,
491 &ff_msmp4_mb_i_table[0][0], 4, 2, 1);
492 }
493
494 /* Other defaults */
495 v->pq = -1;
496 v->mvrange = 0; /* 7.1.1.18, p80 */
497
498 return 0;
499}
500
501/***********************************************************************/
502/**
503 * @defgroup bitplane VC9 Bitplane decoding
504 * @see 8.7, p56
505 * @{
506 */
507
508/** @addtogroup bitplane
509 * Imode types
510 * @{
511 */
512enum Imode {
513 IMODE_RAW,
514 IMODE_NORM2,
515 IMODE_DIFF2,
516 IMODE_NORM6,
517 IMODE_DIFF6,
518 IMODE_ROWSKIP,
519 IMODE_COLSKIP
520};
521/** @} */ //imode defines
522
523/** Decode rows by checking if they are skipped
524 * @param plane Buffer to store decoded bits
525 * @param[in] width Width of this buffer
526 * @param[in] height Height of this buffer
527 * @param[in] stride of this buffer
528 */
529static void decode_rowskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
530 int x, y;
531
532 for (y=0; y<height; y++){
533 if (!get_bits(gb, 1)) //rowskip
534 memset(plane, 0, width);
535 else
536 for (x=0; x<width; x++)
537 plane[x] = get_bits(gb, 1);
538 plane += stride;
539 }
540}
541
542/** Decode columns by checking if they are skipped
543 * @param plane Buffer to store decoded bits
544 * @param[in] width Width of this buffer
545 * @param[in] height Height of this buffer
546 * @param[in] stride of this buffer
547 * @fixme FIXME: Optimize
548 */
549static void decode_colskip(uint8_t* plane, int width, int height, int stride, GetBitContext *gb){
550 int x, y;
551
552 for (x=0; x<width; x++){
553 if (!get_bits(gb, 1)) //colskip
554 for (y=0; y<height; y++)
555 plane[y*stride] = 0;
556 else
557 for (y=0; y<height; y++)
558 plane[y*stride] = get_bits(gb, 1);
559 plane ++;
560 }
561}
562
563/** Decode a bitplane's bits
564 * @param bp Bitplane where to store the decode bits
565 * @param v VC-1 context for bit reading and logging
566 * @return Status
567 * @fixme FIXME: Optimize
568 */
569static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
570{
571 GetBitContext *gb = &v->s.gb;
572
573 int imode, x, y, code, offset;
574 uint8_t invert, *planep = data;
575 int width, height, stride;
576
577 width = v->s.mb_width;
578 height = v->s.mb_height;
579 stride = v->s.mb_stride;
580 invert = get_bits(gb, 1);
581 imode = get_vlc2(gb, vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
582
583 *raw_flag = 0;
584 switch (imode)
585 {
586 case IMODE_RAW:
587 //Data is actually read in the MB layer (same for all tests == "raw")
588 *raw_flag = 1; //invert ignored
589 return invert;
590 case IMODE_DIFF2:
591 case IMODE_NORM2:
592 if ((height * width) & 1)
593 {
594 *planep++ = get_bits(gb, 1);
595 offset = 1;
596 }
597 else offset = 0;
598 // decode bitplane as one long line
599 for (y = offset; y < height * width; y += 2) {
600 code = get_vlc2(gb, vc1_norm2_vlc.table, VC1_NORM2_VLC_BITS, 1);
601 *planep++ = code & 1;
602 offset++;
603 if(offset == width) {
604 offset = 0;
605 planep += stride - width;
606 }
607 *planep++ = code >> 1;
608 offset++;
609 if(offset == width) {
610 offset = 0;
611 planep += stride - width;
612 }
613 }
614 break;
615 case IMODE_DIFF6:
616 case IMODE_NORM6:
617 if(!(height % 3) && (width % 3)) { // use 2x3 decoding
618 for(y = 0; y < height; y+= 3) {
619 for(x = width & 1; x < width; x += 2) {
620 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
621 if(code < 0){
622 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
623 return -1;
624 }
625 planep[x + 0] = (code >> 0) & 1;
626 planep[x + 1] = (code >> 1) & 1;
627 planep[x + 0 + stride] = (code >> 2) & 1;
628 planep[x + 1 + stride] = (code >> 3) & 1;
629 planep[x + 0 + stride * 2] = (code >> 4) & 1;
630 planep[x + 1 + stride * 2] = (code >> 5) & 1;
631 }
632 planep += stride * 3;
633 }
634 if(width & 1) decode_colskip(data, 1, height, stride, &v->s.gb);
635 } else { // 3x2
636 planep += (height & 1) * stride;
637 for(y = height & 1; y < height; y += 2) {
638 for(x = width % 3; x < width; x += 3) {
639 code = get_vlc2(gb, vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
640 if(code < 0){
641 av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
642 return -1;
643 }
644 planep[x + 0] = (code >> 0) & 1;
645 planep[x + 1] = (code >> 1) & 1;
646 planep[x + 2] = (code >> 2) & 1;
647 planep[x + 0 + stride] = (code >> 3) & 1;
648 planep[x + 1 + stride] = (code >> 4) & 1;
649 planep[x + 2 + stride] = (code >> 5) & 1;
650 }
651 planep += stride * 2;
652 }
653 x = width % 3;
654 if(x) decode_colskip(data , x, height , stride, &v->s.gb);
655 if(height & 1) decode_rowskip(data+x, width - x, 1, stride, &v->s.gb);
656 }
657 break;
658 case IMODE_ROWSKIP:
659 decode_rowskip(data, width, height, stride, &v->s.gb);
660 break;
661 case IMODE_COLSKIP:
662 decode_colskip(data, width, height, stride, &v->s.gb);
663 break;
664 default: break;
665 }
666
667 /* Applying diff operator */
668 if (imode == IMODE_DIFF2 || imode == IMODE_DIFF6)
669 {
670 planep = data;
671 planep[0] ^= invert;
672 for (x=1; x<width; x++)
673 planep[x] ^= planep[x-1];
674 for (y=1; y<height; y++)
675 {
676 planep += stride;
677 planep[0] ^= planep[-stride];
678 for (x=1; x<width; x++)
679 {
680 if (planep[x-1] != planep[x-stride]) planep[x] ^= invert;
681 else planep[x] ^= planep[x-1];
682 }
683 }
684 }
685 else if (invert)
686 {
687 planep = data;
688 for (x=0; x<stride*height; x++) planep[x] = !planep[x]; //FIXME stride
689 }
690 return (imode<<1) + invert;
691}
692
693/** @} */ //Bitplane group
694
695/***********************************************************************/
696/** VOP Dquant decoding
697 * @param v VC-1 Context
698 */
699static int vop_dquant_decoding(VC1Context *v)
700{
701 GetBitContext *gb = &v->s.gb;
702 int pqdiff;
703
704 //variable size
705 if (v->dquant == 2)
706 {
707 pqdiff = get_bits(gb, 3);
708 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
709 else v->altpq = v->pq + pqdiff + 1;
710 }
711 else
712 {
713 v->dquantfrm = get_bits(gb, 1);
714 if ( v->dquantfrm )
715 {
716 v->dqprofile = get_bits(gb, 2);
717 switch (v->dqprofile)
718 {
719 case DQPROFILE_SINGLE_EDGE:
720 case DQPROFILE_DOUBLE_EDGES:
721 v->dqsbedge = get_bits(gb, 2);
722 break;
723 case DQPROFILE_ALL_MBS:
724 v->dqbilevel = get_bits(gb, 1);
725 default: break; //Forbidden ?
726 }
727 if (v->dqbilevel || v->dqprofile != DQPROFILE_ALL_MBS)
728 {
729 pqdiff = get_bits(gb, 3);
730 if (pqdiff == 7) v->altpq = get_bits(gb, 5);
731 else v->altpq = v->pq + pqdiff + 1;
732 }
733 }
734 }
735 return 0;
736}
737
738/** Put block onto picture
739 */
740static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
741{
742 uint8_t *Y;
743 int ys, us, vs;
744 DSPContext *dsp = &v->s.dsp;
745
746 if(v->rangeredfrm) {
747 int i, j, k;
748 for(k = 0; k < 6; k++)
749 for(j = 0; j < 8; j++)
750 for(i = 0; i < 8; i++)
751 block[k][i + j*8] = ((block[k][i + j*8] - 128) << 1) + 128;
752
753 }
754 ys = v->s.current_picture.linesize[0];
755 us = v->s.current_picture.linesize[1];
756 vs = v->s.current_picture.linesize[2];
757 Y = v->s.dest[0];
758
759 dsp->put_pixels_clamped(block[0], Y, ys);
760 dsp->put_pixels_clamped(block[1], Y + 8, ys);
761 Y += ys * 8;
762 dsp->put_pixels_clamped(block[2], Y, ys);
763 dsp->put_pixels_clamped(block[3], Y + 8, ys);
764
765 if(!(v->s.flags & CODEC_FLAG_GRAY)) {
766 dsp->put_pixels_clamped(block[4], v->s.dest[1], us);
767 dsp->put_pixels_clamped(block[5], v->s.dest[2], vs);
768 }
769}
770
771/** Do motion compensation over 1 macroblock
772 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
773 */
774static void vc1_mc_1mv(VC1Context *v, int dir)
775{
776 MpegEncContext *s = &v->s;
777 DSPContext *dsp = &v->s.dsp;
778 uint8_t *srcY, *srcU, *srcV;
779 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
780
781 if(!v->s.last_picture.data[0])return;
782
783 mx = s->mv[dir][0][0];
784 my = s->mv[dir][0][1];
785
786 // store motion vectors for further use in B frames
787 if(s->pict_type == P_TYPE) {
788 s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
789 s->current_picture.motion_val[1][s->block_index[0]][1] = my;
790 }
791 uvmx = (mx + ((mx & 3) == 3)) >> 1;
792 uvmy = (my + ((my & 3) == 3)) >> 1;
793 if(!dir) {
794 srcY = s->last_picture.data[0];
795 srcU = s->last_picture.data[1];
796 srcV = s->last_picture.data[2];
797 } else {
798 srcY = s->next_picture.data[0];
799 srcU = s->next_picture.data[1];
800 srcV = s->next_picture.data[2];
801 }
802
803 src_x = s->mb_x * 16 + (mx >> 2);
804 src_y = s->mb_y * 16 + (my >> 2);
805 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
806 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
807
808 src_x = clip( src_x, -16, s->mb_width * 16);
809 src_y = clip( src_y, -16, s->mb_height * 16);
810 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
811 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
812
813 srcY += src_y * s->linesize + src_x;
814 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
815 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
816
817 /* for grayscale we should not try to read from unknown area */
818 if(s->flags & CODEC_FLAG_GRAY) {
819 srcU = s->edge_emu_buffer + 18 * s->linesize;
820 srcV = s->edge_emu_buffer + 18 * s->linesize;
821 }
822
823 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
824 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel*3
825 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 16 - s->mspel*3){
826 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
827
828 srcY -= s->mspel * (1 + s->linesize);
829 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
830 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
831 srcY = s->edge_emu_buffer;
832 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
833 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
834 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
835 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
836 srcU = uvbuf;
837 srcV = uvbuf + 16;
838 /* if we deal with range reduction we need to scale source blocks */
839 if(v->rangeredfrm) {
840 int i, j;
841 uint8_t *src, *src2;
842
843 src = srcY;
844 for(j = 0; j < 17 + s->mspel*2; j++) {
845 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
846 src += s->linesize;
847 }
848 src = srcU; src2 = srcV;
849 for(j = 0; j < 9; j++) {
850 for(i = 0; i < 9; i++) {
851 src[i] = ((src[i] - 128) >> 1) + 128;
852 src2[i] = ((src2[i] - 128) >> 1) + 128;
853 }
854 src += s->uvlinesize;
855 src2 += s->uvlinesize;
856 }
857 }
858 /* if we deal with intensity compensation we need to scale source blocks */
859 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
860 int i, j;
861 uint8_t *src, *src2;
862
863 src = srcY;
864 for(j = 0; j < 17 + s->mspel*2; j++) {
865 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = v->luty[src[i]];
866 src += s->linesize;
867 }
868 src = srcU; src2 = srcV;
869 for(j = 0; j < 9; j++) {
870 for(i = 0; i < 9; i++) {
871 src[i] = v->lutuv[src[i]];
872 src2[i] = v->lutuv[src2[i]];
873 }
874 src += s->uvlinesize;
875 src2 += s->uvlinesize;
876 }
877 }
878 srcY += s->mspel * (1 + s->linesize);
879 }
880
881 if(v->fastuvmc) {
882 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
883 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
884 }
885
886 if(s->mspel) {
887 dxy = ((my & 3) << 2) | (mx & 3);
888 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
889 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
890 srcY += s->linesize * 8;
891 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
892 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
893 } else { // hpel mc - always used for luma
894 dxy = (my & 2) | ((mx & 2) >> 1);
895
896 if(!v->rnd)
897 dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
898 else
899 dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
900 }
901
902 if(s->flags & CODEC_FLAG_GRAY) return;
903 /* Chroma MC always uses qpel bilinear */
904 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
905 uvmx = (uvmx&3)<<1;
906 uvmy = (uvmy&3)<<1;
907 if(!v->rnd){
908 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
909 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
910 }else{
911 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
912 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
913 }
914}
915
916/** Do motion compensation for 4-MV macroblock - luminance block
917 */
918static void vc1_mc_4mv_luma(VC1Context *v, int n)
919{
920 MpegEncContext *s = &v->s;
921 DSPContext *dsp = &v->s.dsp;
922 uint8_t *srcY;
923 int dxy, mx, my, src_x, src_y;
924 int off;
925
926 if(!v->s.last_picture.data[0])return;
927 mx = s->mv[0][n][0];
928 my = s->mv[0][n][1];
929 srcY = s->last_picture.data[0];
930
931 off = s->linesize * 4 * (n&2) + (n&1) * 8;
932
933 src_x = s->mb_x * 16 + (n&1) * 8 + (mx >> 2);
934 src_y = s->mb_y * 16 + (n&2) * 4 + (my >> 2);
935
936 src_x = clip( src_x, -16, s->mb_width * 16);
937 src_y = clip( src_y, -16, s->mb_height * 16);
938
939 srcY += src_y * s->linesize + src_x;
940
941 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
942 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
943 || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
944 srcY -= s->mspel * (1 + s->linesize);
945 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
946 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
947 srcY = s->edge_emu_buffer;
948 /* if we deal with range reduction we need to scale source blocks */
949 if(v->rangeredfrm) {
950 int i, j;
951 uint8_t *src;
952
953 src = srcY;
954 for(j = 0; j < 9 + s->mspel*2; j++) {
955 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
956 src += s->linesize;
957 }
958 }
959 /* if we deal with intensity compensation we need to scale source blocks */
960 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
961 int i, j;
962 uint8_t *src;
963
964 src = srcY;
965 for(j = 0; j < 9 + s->mspel*2; j++) {
966 for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
967 src += s->linesize;
968 }
969 }
970 srcY += s->mspel * (1 + s->linesize);
971 }
972
973 if(s->mspel) {
974 dxy = ((my & 3) << 2) | (mx & 3);
975 dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
976 } else { // hpel mc - always used for luma
977 dxy = (my & 2) | ((mx & 2) >> 1);
978 if(!v->rnd)
979 dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
980 else
981 dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
982 }
983}
984
985static inline int median4(int a, int b, int c, int d)
986{
987 if(a < b) {
988 if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
989 else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
990 } else {
991 if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
992 else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
993 }
994}
995
996
997/** Do motion compensation for 4-MV macroblock - both chroma blocks
998 */
999static void vc1_mc_4mv_chroma(VC1Context *v)
1000{
1001 MpegEncContext *s = &v->s;
1002 DSPContext *dsp = &v->s.dsp;
1003 uint8_t *srcU, *srcV;
1004 int uvdxy, uvmx, uvmy, uvsrc_x, uvsrc_y;
1005 int i, idx, tx = 0, ty = 0;
1006 int mvx[4], mvy[4], intra[4];
1007 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
1008
1009 if(!v->s.last_picture.data[0])return;
1010 if(s->flags & CODEC_FLAG_GRAY) return;
1011
1012 for(i = 0; i < 4; i++) {
1013 mvx[i] = s->mv[0][i][0];
1014 mvy[i] = s->mv[0][i][1];
1015 intra[i] = v->mb_type[0][s->block_index[i]];
1016 }
1017
1018 /* calculate chroma MV vector from four luma MVs */
1019 idx = (intra[3] << 3) | (intra[2] << 2) | (intra[1] << 1) | intra[0];
1020 if(!idx) { // all blocks are inter
1021 tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
1022 ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
1023 } else if(count[idx] == 1) { // 3 inter blocks
1024 switch(idx) {
1025 case 0x1:
1026 tx = mid_pred(mvx[1], mvx[2], mvx[3]);
1027 ty = mid_pred(mvy[1], mvy[2], mvy[3]);
1028 break;
1029 case 0x2:
1030 tx = mid_pred(mvx[0], mvx[2], mvx[3]);
1031 ty = mid_pred(mvy[0], mvy[2], mvy[3]);
1032 break;
1033 case 0x4:
1034 tx = mid_pred(mvx[0], mvx[1], mvx[3]);
1035 ty = mid_pred(mvy[0], mvy[1], mvy[3]);
1036 break;
1037 case 0x8:
1038 tx = mid_pred(mvx[0], mvx[1], mvx[2]);
1039 ty = mid_pred(mvy[0], mvy[1], mvy[2]);
1040 break;
1041 }
1042 } else if(count[idx] == 2) {
1043 int t1 = 0, t2 = 0;
1044 for(i=0; i<3;i++) if(!intra[i]) {t1 = i; break;}
1045 for(i= t1+1; i<4; i++)if(!intra[i]) {t2 = i; break;}
1046 tx = (mvx[t1] + mvx[t2]) / 2;
1047 ty = (mvy[t1] + mvy[t2]) / 2;
1048 } else
1049 return; //no need to do MC for inter blocks
1050
1051 s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
1052 s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
1053 uvmx = (tx + ((tx&3) == 3)) >> 1;
1054 uvmy = (ty + ((ty&3) == 3)) >> 1;
1055
1056 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1057 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1058
1059 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
1060 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
1061 srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
1062 srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
1063 if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1064 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
1065 || (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
1066 ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
1067 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1068 ff_emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 8+1, 8+1,
1069 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
1070 srcU = s->edge_emu_buffer;
1071 srcV = s->edge_emu_buffer + 16;
1072
1073 /* if we deal with range reduction we need to scale source blocks */
1074 if(v->rangeredfrm) {
1075 int i, j;
1076 uint8_t *src, *src2;
1077
1078 src = srcU; src2 = srcV;
1079 for(j = 0; j < 9; j++) {
1080 for(i = 0; i < 9; i++) {
1081 src[i] = ((src[i] - 128) >> 1) + 128;
1082 src2[i] = ((src2[i] - 128) >> 1) + 128;
1083 }
1084 src += s->uvlinesize;
1085 src2 += s->uvlinesize;
1086 }
1087 }
1088 /* if we deal with intensity compensation we need to scale source blocks */
1089 if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1090 int i, j;
1091 uint8_t *src, *src2;
1092
1093 src = srcU; src2 = srcV;
1094 for(j = 0; j < 9; j++) {
1095 for(i = 0; i < 9; i++) {
1096 src[i] = v->lutuv[src[i]];
1097 src2[i] = v->lutuv[src2[i]];
1098 }
1099 src += s->uvlinesize;
1100 src2 += s->uvlinesize;
1101 }
1102 }
1103 }
1104
1105 if(v->fastuvmc) {
1106 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
1107 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
1108 }
1109
1110 /* Chroma MC always uses qpel bilinear */
1111 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
1112 uvmx = (uvmx&3)<<1;
1113 uvmy = (uvmy&3)<<1;
1114 if(!v->rnd){
1115 dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1116 dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1117 }else{
1118 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
1119 dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
1120 }
1121}
1122
1123static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
1124
1125/**
1126 * Decode Simple/Main Profiles sequence header
1127 * @see Figure 7-8, p16-17
1128 * @param avctx Codec context
1129 * @param gb GetBit context initialized from Codec context extra_data
1130 * @return Status
1131 */
1132static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
1133{
1134 VC1Context *v = avctx->priv_data;
1135
1136 av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
1137 v->profile = get_bits(gb, 2);
1138 if (v->profile == 2)
1139 {
1140 av_log(avctx, AV_LOG_ERROR, "Profile value 2 is forbidden (and WMV3 Complex Profile is unsupported)\n");
1141 return -1;
1142 }
1143
1144 if (v->profile == PROFILE_ADVANCED)
1145 {
1146 return decode_sequence_header_adv(v, gb);
1147 }
1148 else
1149 {
1150 v->res_sm = get_bits(gb, 2); //reserved
1151 if (v->res_sm)
1152 {
1153 av_log(avctx, AV_LOG_ERROR,
1154 "Reserved RES_SM=%i is forbidden\n", v->res_sm);
1155 return -1;
1156 }
1157 }
1158
1159 // (fps-2)/4 (->30)
1160 v->frmrtq_postproc = get_bits(gb, 3); //common
1161 // (bitrate-32kbps)/64kbps
1162 v->bitrtq_postproc = get_bits(gb, 5); //common
1163 v->s.loop_filter = get_bits(gb, 1); //common
1164 if(v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE)
1165 {
1166 av_log(avctx, AV_LOG_ERROR,
1167 "LOOPFILTER shell not be enabled in simple profile\n");
1168 }
1169
1170 v->res_x8 = get_bits(gb, 1); //reserved
1171 if (v->res_x8)
1172 {
1173 av_log(avctx, AV_LOG_ERROR,
1174 "1 for reserved RES_X8 is forbidden\n");
1175 //return -1;
1176 }
1177 v->multires = get_bits(gb, 1);
1178 v->res_fasttx = get_bits(gb, 1);
1179 if (!v->res_fasttx)
1180 {
1181 av_log(avctx, AV_LOG_ERROR,
1182 "0 for reserved RES_FASTTX is forbidden\n");
1183 //return -1;
1184 }
1185
1186 v->fastuvmc = get_bits(gb, 1); //common
1187 if (!v->profile && !v->fastuvmc)
1188 {
1189 av_log(avctx, AV_LOG_ERROR,
1190 "FASTUVMC unavailable in Simple Profile\n");
1191 return -1;
1192 }
1193 v->extended_mv = get_bits(gb, 1); //common
1194 if (!v->profile && v->extended_mv)
1195 {
1196 av_log(avctx, AV_LOG_ERROR,
1197 "Extended MVs unavailable in Simple Profile\n");
1198 return -1;
1199 }
1200 v->dquant = get_bits(gb, 2); //common
1201 v->vstransform = get_bits(gb, 1); //common
1202
1203 v->res_transtab = get_bits(gb, 1);
1204 if (v->res_transtab)
1205 {
1206 av_log(avctx, AV_LOG_ERROR,
1207 "1 for reserved RES_TRANSTAB is forbidden\n");
1208 return -1;
1209 }
1210
1211 v->overlap = get_bits(gb, 1); //common
1212
1213 v->s.resync_marker = get_bits(gb, 1);
1214 v->rangered = get_bits(gb, 1);
1215 if (v->rangered && v->profile == PROFILE_SIMPLE)
1216 {
1217 av_log(avctx, AV_LOG_INFO,
1218 "RANGERED should be set to 0 in simple profile\n");
1219 }
1220
1221 v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
1222 v->quantizer_mode = get_bits(gb, 2); //common
1223
1224 v->finterpflag = get_bits(gb, 1); //common
1225 v->res_rtm_flag = get_bits(gb, 1); //reserved
1226 if (!v->res_rtm_flag)
1227 {
1228// av_log(avctx, AV_LOG_ERROR,
1229// "0 for reserved RES_RTM_FLAG is forbidden\n");
1230 av_log(avctx, AV_LOG_ERROR,
1231 "Old WMV3 version detected, only I-frames will be decoded\n");
1232 //return -1;
1233 }
1234 av_log(avctx, AV_LOG_DEBUG,
1235 "Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
1236 "LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
1237 "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
1238 "DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
1239 v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
1240 v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
1241 v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
1242 v->dquant, v->quantizer_mode, avctx->max_b_frames
1243 );
1244 return 0;
1245}
1246
1247static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
1248{
1249 v->res_rtm_flag = 1;
1250 v->level = get_bits(gb, 3);
1251 if(v->level >= 5)
1252 {
1253 av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
1254 }
1255 v->chromaformat = get_bits(gb, 2);
1256 if (v->chromaformat != 1)
1257 {
1258 av_log(v->s.avctx, AV_LOG_ERROR,
1259 "Only 4:2:0 chroma format supported\n");
1260 return -1;
1261 }
1262
1263 // (fps-2)/4 (->30)
1264 v->frmrtq_postproc = get_bits(gb, 3); //common
1265 // (bitrate-32kbps)/64kbps
1266 v->bitrtq_postproc = get_bits(gb, 5); //common
1267 v->postprocflag = get_bits(gb, 1); //common
1268
1269 v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
1270 v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
1271 v->broadcast = get_bits1(gb);
1272 v->interlace = get_bits1(gb);
1273 v->tfcntrflag = get_bits1(gb);
1274 v->finterpflag = get_bits1(gb);
1275 get_bits1(gb); // reserved
1276 v->psf = get_bits1(gb);
1277 if(v->psf) { //PsF, 6.1.13
1278 av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
1279 return -1;
1280 }
1281 if(get_bits1(gb)) { //Display Info - decoding is not affected by it
1282 int w, h, ar = 0;
1283 av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
1284 w = get_bits(gb, 14);
1285 h = get_bits(gb, 14);
1286 av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
1287 //TODO: store aspect ratio in AVCodecContext
1288 if(get_bits1(gb))
1289 ar = get_bits(gb, 4);
1290 if(ar == 15) {
1291 w = get_bits(gb, 8);
1292 h = get_bits(gb, 8);
1293 }
1294
1295 if(get_bits1(gb)){ //framerate stuff
1296 if(get_bits1(gb)) {
1297 get_bits(gb, 16);
1298 } else {
1299 get_bits(gb, 8);
1300 get_bits(gb, 4);
1301 }
1302 }
1303
1304 if(get_bits1(gb)){
1305 v->color_prim = get_bits(gb, 8);
1306 v->transfer_char = get_bits(gb, 8);
1307 v->matrix_coef = get_bits(gb, 8);
1308 }
1309 }
1310
1311 v->hrd_param_flag = get_bits1(gb);
1312 if(v->hrd_param_flag) {
1313 int i;
1314 v->hrd_num_leaky_buckets = get_bits(gb, 5);
1315 get_bits(gb, 4); //bitrate exponent
1316 get_bits(gb, 4); //buffer size exponent
1317 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1318 get_bits(gb, 16); //hrd_rate[n]
1319 get_bits(gb, 16); //hrd_buffer[n]
1320 }
1321 }
1322 return 0;
1323}
1324
1325static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
1326{
1327 VC1Context *v = avctx->priv_data;
1328 int i;
1329
1330 av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
1331 get_bits1(gb); // broken link
1332 avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
1333 v->panscanflag = get_bits1(gb);
1334 get_bits1(gb); // refdist flag
1335 v->s.loop_filter = get_bits1(gb);
1336 v->fastuvmc = get_bits1(gb);
1337 v->extended_mv = get_bits1(gb);
1338 v->dquant = get_bits(gb, 2);
1339 v->vstransform = get_bits1(gb);
1340 v->overlap = get_bits1(gb);
1341 v->quantizer_mode = get_bits(gb, 2);
1342
1343 if(v->hrd_param_flag){
1344 for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
1345 get_bits(gb, 8); //hrd_full[n]
1346 }
1347 }
1348
1349 if(get_bits1(gb)){
1350 avctx->coded_width = (get_bits(gb, 12)+1)<<1;
1351 avctx->coded_height = (get_bits(gb, 12)+1)<<1;
1352 }
1353 if(v->extended_mv)
1354 v->extended_dmv = get_bits1(gb);
1355 if(get_bits1(gb)) {
1356 av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
1357 skip_bits(gb, 3); // Y range, ignored for now
1358 }
1359 if(get_bits1(gb)) {
1360 av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
1361 skip_bits(gb, 3); // UV range, ignored for now
1362 }
1363
1364 return 0;
1365}
1366
1367static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
1368{
1369 int pqindex, lowquant, status;
1370
1371 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1372 skip_bits(gb, 2); //framecnt unused
1373 v->rangeredfrm = 0;
1374 if (v->rangered) v->rangeredfrm = get_bits(gb, 1);
1375 v->s.pict_type = get_bits(gb, 1);
1376 if (v->s.avctx->max_b_frames) {
1377 if (!v->s.pict_type) {
1378 if (get_bits(gb, 1)) v->s.pict_type = I_TYPE;
1379 else v->s.pict_type = B_TYPE;
1380 } else v->s.pict_type = P_TYPE;
1381 } else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
1382
1383 v->bi_type = 0;
1384 if(v->s.pict_type == B_TYPE) {
1385 v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
1386 v->bfraction = vc1_bfraction_lut[v->bfraction];
1387 if(v->bfraction == 0) {
1388 v->s.pict_type = BI_TYPE;
1389 }
1390 }
1391 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1392 get_bits(gb, 7); // skip buffer fullness
1393
1394 /* calculate RND */
1395 if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1396 v->rnd = 1;
1397 if(v->s.pict_type == P_TYPE)
1398 v->rnd ^= 1;
1399
1400 /* Quantizer stuff */
1401 pqindex = get_bits(gb, 5);
1402 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1403 v->pq = pquant_table[0][pqindex];
1404 else
1405 v->pq = pquant_table[1][pqindex];
1406
1407 v->pquantizer = 1;
1408 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1409 v->pquantizer = pqindex < 9;
1410 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1411 v->pquantizer = 0;
1412 v->pqindex = pqindex;
1413 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1414 else v->halfpq = 0;
1415 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1416 v->pquantizer = get_bits(gb, 1);
1417 v->dquantfrm = 0;
1418 if (v->extended_mv == 1) v->mvrange = get_prefix(gb, 0, 3);
1419 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1420 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1421 v->range_x = 1 << (v->k_x - 1);
1422 v->range_y = 1 << (v->k_y - 1);
1423 if (v->profile == PROFILE_ADVANCED)
1424 {
1425 if (v->postprocflag) v->postproc = get_bits(gb, 1);
1426 }
1427 else
1428 if (v->multires && v->s.pict_type != B_TYPE) v->respic = get_bits(gb, 2);
1429
1430//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
1431// (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
1432
1433 if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
1434
1435 switch(v->s.pict_type) {
1436 case P_TYPE:
1437 if (v->pq < 5) v->tt_index = 0;
1438 else if(v->pq < 13) v->tt_index = 1;
1439 else v->tt_index = 2;
1440
1441 lowquant = (v->pq > 12) ? 0 : 1;
1442 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1443 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1444 {
1445 int scale, shift, i;
1446 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1447 v->lumscale = get_bits(gb, 6);
1448 v->lumshift = get_bits(gb, 6);
1449 v->use_ic = 1;
1450 /* fill lookup tables for intensity compensation */
1451 if(!v->lumscale) {
1452 scale = -64;
1453 shift = (255 - v->lumshift * 2) << 6;
1454 if(v->lumshift > 31)
1455 shift += 128 << 6;
1456 } else {
1457 scale = v->lumscale + 32;
1458 if(v->lumshift > 31)
1459 shift = (v->lumshift - 64) << 6;
1460 else
1461 shift = v->lumshift << 6;
1462 }
1463 for(i = 0; i < 256; i++) {
1464 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1465 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1466 }
1467 }
1468 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1469 v->s.quarter_sample = 0;
1470 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1471 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1472 v->s.quarter_sample = 0;
1473 else
1474 v->s.quarter_sample = 1;
1475 } else
1476 v->s.quarter_sample = 1;
1477 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1478
1479 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1480 v->mv_mode2 == MV_PMODE_MIXED_MV)
1481 || v->mv_mode == MV_PMODE_MIXED_MV)
1482 {
1483 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1484 if (status < 0) return -1;
1485 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1486 "Imode: %i, Invert: %i\n", status>>1, status&1);
1487 } else {
1488 v->mv_type_is_raw = 0;
1489 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1490 }
1491 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1492 if (status < 0) return -1;
1493 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1494 "Imode: %i, Invert: %i\n", status>>1, status&1);
1495
1496 /* Hopefully this is correct for P frames */
1497 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1498 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1499
1500 if (v->dquant)
1501 {
1502 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1503 vop_dquant_decoding(v);
1504 }
1505
1506 v->ttfrm = 0; //FIXME Is that so ?
1507 if (v->vstransform)
1508 {
1509 v->ttmbf = get_bits(gb, 1);
1510 if (v->ttmbf)
1511 {
1512 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1513 }
1514 } else {
1515 v->ttmbf = 1;
1516 v->ttfrm = TT_8X8;
1517 }
1518 break;
1519 case B_TYPE:
1520 if (v->pq < 5) v->tt_index = 0;
1521 else if(v->pq < 13) v->tt_index = 1;
1522 else v->tt_index = 2;
1523
1524 lowquant = (v->pq > 12) ? 0 : 1;
1525 v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
1526 v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
1527 v->s.mspel = v->s.quarter_sample;
1528
1529 status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
1530 if (status < 0) return -1;
1531 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
1532 "Imode: %i, Invert: %i\n", status>>1, status&1);
1533 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1534 if (status < 0) return -1;
1535 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1536 "Imode: %i, Invert: %i\n", status>>1, status&1);
1537
1538 v->s.mv_table_index = get_bits(gb, 2);
1539 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1540
1541 if (v->dquant)
1542 {
1543 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1544 vop_dquant_decoding(v);
1545 }
1546
1547 v->ttfrm = 0;
1548 if (v->vstransform)
1549 {
1550 v->ttmbf = get_bits(gb, 1);
1551 if (v->ttmbf)
1552 {
1553 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1554 }
1555 } else {
1556 v->ttmbf = 1;
1557 v->ttfrm = TT_8X8;
1558 }
1559 break;
1560 }
1561
1562 /* AC Syntax */
1563 v->c_ac_table_index = decode012(gb);
1564 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1565 {
1566 v->y_ac_table_index = decode012(gb);
1567 }
1568 /* DC Syntax */
1569 v->s.dc_table_index = get_bits(gb, 1);
1570
1571 if(v->s.pict_type == BI_TYPE) {
1572 v->s.pict_type = B_TYPE;
1573 v->bi_type = 1;
1574 }
1575 return 0;
1576}
1577
1578static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
1579{
1580 int fcm;
1581 int pqindex, lowquant;
1582 int status;
1583
1584 v->p_frame_skipped = 0;
1585
1586 if(v->interlace)
1587 fcm = decode012(gb);
1588 switch(get_prefix(gb, 0, 4)) {
1589 case 0:
1590 v->s.pict_type = P_TYPE;
1591 break;
1592 case 1:
1593 v->s.pict_type = B_TYPE;
1594 return -1;
1595// break;
1596 case 2:
1597 v->s.pict_type = I_TYPE;
1598 break;
1599 case 3:
1600 v->s.pict_type = BI_TYPE;
1601 break;
1602 case 4:
1603 v->s.pict_type = P_TYPE; // skipped pic
1604 v->p_frame_skipped = 1;
1605 return 0;
1606 }
1607 if(v->tfcntrflag)
1608 get_bits(gb, 8);
1609 if(v->broadcast) {
1610 if(!v->interlace || v->panscanflag) {
1611 get_bits(gb, 2);
1612 } else {
1613 get_bits1(gb);
1614 get_bits1(gb);
1615 }
1616 }
1617 if(v->panscanflag) {
1618 //...
1619 }
1620 v->rnd = get_bits1(gb);
1621 if(v->interlace)
1622 v->uvsamp = get_bits1(gb);
1623 if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
1624 pqindex = get_bits(gb, 5);
1625 v->pqindex = pqindex;
1626 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1627 v->pq = pquant_table[0][pqindex];
1628 else
1629 v->pq = pquant_table[1][pqindex];
1630
1631 v->pquantizer = 1;
1632 if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
1633 v->pquantizer = pqindex < 9;
1634 if (v->quantizer_mode == QUANT_NON_UNIFORM)
1635 v->pquantizer = 0;
1636 v->pqindex = pqindex;
1637 if (pqindex < 9) v->halfpq = get_bits(gb, 1);
1638 else v->halfpq = 0;
1639 if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
1640 v->pquantizer = get_bits(gb, 1);
1641
1642 switch(v->s.pict_type) {
1643 case I_TYPE:
1644 case BI_TYPE:
1645 status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
1646 if (status < 0) return -1;
1647 av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
1648 "Imode: %i, Invert: %i\n", status>>1, status&1);
1649 v->condover = CONDOVER_NONE;
1650 if(v->overlap && v->pq <= 8) {
1651 v->condover = decode012(gb);
1652 if(v->condover == CONDOVER_SELECT) {
1653 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
1654 if (status < 0) return -1;
1655 av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
1656 "Imode: %i, Invert: %i\n", status>>1, status&1);
1657 }
1658 }
1659 break;
1660 case P_TYPE:
1661 if(v->postprocflag)
1662 v->postproc = get_bits1(gb);
1663 if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
1664 else v->mvrange = 0;
1665 v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
1666 v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
1667 v->range_x = 1 << (v->k_x - 1);
1668 v->range_y = 1 << (v->k_y - 1);
1669
1670 if (v->pq < 5) v->tt_index = 0;
1671 else if(v->pq < 13) v->tt_index = 1;
1672 else v->tt_index = 2;
1673
1674 lowquant = (v->pq > 12) ? 0 : 1;
1675 v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
1676 if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
1677 {
1678 int scale, shift, i;
1679 v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
1680 v->lumscale = get_bits(gb, 6);
1681 v->lumshift = get_bits(gb, 6);
1682 /* fill lookup tables for intensity compensation */
1683 if(!v->lumscale) {
1684 scale = -64;
1685 shift = (255 - v->lumshift * 2) << 6;
1686 if(v->lumshift > 31)
1687 shift += 128 << 6;
1688 } else {
1689 scale = v->lumscale + 32;
1690 if(v->lumshift > 31)
1691 shift = (v->lumshift - 64) << 6;
1692 else
1693 shift = v->lumshift << 6;
1694 }
1695 for(i = 0; i < 256; i++) {
1696 v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
1697 v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
1698 }
1699 }
1700 if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
1701 v->s.quarter_sample = 0;
1702 else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1703 if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
1704 v->s.quarter_sample = 0;
1705 else
1706 v->s.quarter_sample = 1;
1707 } else
1708 v->s.quarter_sample = 1;
1709 v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
1710
1711 if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
1712 v->mv_mode2 == MV_PMODE_MIXED_MV)
1713 || v->mv_mode == MV_PMODE_MIXED_MV)
1714 {
1715 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
1716 if (status < 0) return -1;
1717 av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
1718 "Imode: %i, Invert: %i\n", status>>1, status&1);
1719 } else {
1720 v->mv_type_is_raw = 0;
1721 memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
1722 }
1723 status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
1724 if (status < 0) return -1;
1725 av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
1726 "Imode: %i, Invert: %i\n", status>>1, status&1);
1727
1728 /* Hopefully this is correct for P frames */
1729 v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
1730 v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
1731 if (v->dquant)
1732 {
1733 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1734 vop_dquant_decoding(v);
1735 }
1736
1737 v->ttfrm = 0; //FIXME Is that so ?
1738 if (v->vstransform)
1739 {
1740 v->ttmbf = get_bits(gb, 1);
1741 if (v->ttmbf)
1742 {
1743 v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
1744 }
1745 } else {
1746 v->ttmbf = 1;
1747 v->ttfrm = TT_8X8;
1748 }
1749 break;
1750 }
1751
1752 /* AC Syntax */
1753 v->c_ac_table_index = decode012(gb);
1754 if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
1755 {
1756 v->y_ac_table_index = decode012(gb);
1757 }
1758 /* DC Syntax */
1759 v->s.dc_table_index = get_bits(gb, 1);
1760 if (v->s.pict_type == I_TYPE && v->dquant) {
1761 av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
1762 vop_dquant_decoding(v);
1763 }
1764
1765 v->bi_type = 0;
1766 if(v->s.pict_type == BI_TYPE) {
1767 v->s.pict_type = B_TYPE;
1768 v->bi_type = 1;
1769 }
1770 return 0;
1771}
1772
1773/***********************************************************************/
1774/**
1775 * @defgroup block VC-1 Block-level functions
1776 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1777 * @{
1778 */
1779
1780/**
1781 * @def GET_MQUANT
1782 * @brief Get macroblock-level quantizer scale
1783 */
1784#define GET_MQUANT() \
1785 if (v->dquantfrm) \
1786 { \
1787 int edges = 0; \
1788 if (v->dqprofile == DQPROFILE_ALL_MBS) \
1789 { \
1790 if (v->dqbilevel) \
1791 { \
1792 mquant = (get_bits(gb, 1)) ? v->altpq : v->pq; \
1793 } \
1794 else \
1795 { \
1796 mqdiff = get_bits(gb, 3); \
1797 if (mqdiff != 7) mquant = v->pq + mqdiff; \
1798 else mquant = get_bits(gb, 5); \
1799 } \
1800 } \
1801 if(v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1802 edges = 1 << v->dqsbedge; \
1803 else if(v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1804 edges = (3 << v->dqsbedge) % 15; \
1805 else if(v->dqprofile == DQPROFILE_FOUR_EDGES) \
1806 edges = 15; \
1807 if((edges&1) && !s->mb_x) \
1808 mquant = v->altpq; \
1809 if((edges&2) && s->first_slice_line) \
1810 mquant = v->altpq; \
1811 if((edges&4) && s->mb_x == (s->mb_width - 1)) \
1812 mquant = v->altpq; \
1813 if((edges&8) && s->mb_y == (s->mb_height - 1)) \
1814 mquant = v->altpq; \
1815 }
1816
1817/**
1818 * @def GET_MVDATA(_dmv_x, _dmv_y)
1819 * @brief Get MV differentials
1820 * @see MVDATA decoding from 8.3.5.2, p(1)20
1821 * @param _dmv_x Horizontal differential for decoded MV
1822 * @param _dmv_y Vertical differential for decoded MV
1823 */
1824#define GET_MVDATA(_dmv_x, _dmv_y) \
1825 index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
1826 VC1_MV_DIFF_VLC_BITS, 2); \
1827 if (index > 36) \
1828 { \
1829 mb_has_coeffs = 1; \
1830 index -= 37; \
1831 } \
1832 else mb_has_coeffs = 0; \
1833 s->mb_intra = 0; \
1834 if (!index) { _dmv_x = _dmv_y = 0; } \
1835 else if (index == 35) \
1836 { \
1837 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1838 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1839 } \
1840 else if (index == 36) \
1841 { \
1842 _dmv_x = 0; \
1843 _dmv_y = 0; \
1844 s->mb_intra = 1; \
1845 } \
1846 else \
1847 { \
1848 index1 = index%6; \
1849 if (!s->quarter_sample && index1 == 5) val = 1; \
1850 else val = 0; \
1851 if(size_table[index1] - val > 0) \
1852 val = get_bits(gb, size_table[index1] - val); \
1853 else val = 0; \
1854 sign = 0 - (val&1); \
1855 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1856 \
1857 index1 = index/6; \
1858 if (!s->quarter_sample && index1 == 5) val = 1; \
1859 else val = 0; \
1860 if(size_table[index1] - val > 0) \
1861 val = get_bits(gb, size_table[index1] - val); \
1862 else val = 0; \
1863 sign = 0 - (val&1); \
1864 _dmv_y = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1865 }
1866
1867/** Predict and set motion vector
1868 */
1869static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t* is_intra)
1870{
1871 int xy, wrap, off = 0;
1872 int16_t *A, *B, *C;
1873 int px, py;
1874 int sum;
1875
1876 /* scale MV difference to be quad-pel */
1877 dmv_x <<= 1 - s->quarter_sample;
1878 dmv_y <<= 1 - s->quarter_sample;
1879
1880 wrap = s->b8_stride;
1881 xy = s->block_index[n];
1882
1883 if(s->mb_intra){
1884 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1885 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1886 if(mv1) { /* duplicate motion data for 1-MV block */
1887 s->current_picture.motion_val[0][xy + 1][0] = 0;
1888 s->current_picture.motion_val[0][xy + 1][1] = 0;
1889 s->current_picture.motion_val[0][xy + wrap][0] = 0;
1890 s->current_picture.motion_val[0][xy + wrap][1] = 0;
1891 s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1892 s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1893 }
1894 return;
1895 }
1896
1897 C = s->current_picture.motion_val[0][xy - 1];
1898 A = s->current_picture.motion_val[0][xy - wrap];
1899 if(mv1)
1900 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1901 else {
1902 //in 4-MV mode different blocks have different B predictor position
1903 switch(n){
1904 case 0:
1905 off = (s->mb_x > 0) ? -1 : 1;
1906 break;
1907 case 1:
1908 off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1909 break;
1910 case 2:
1911 off = 1;
1912 break;
1913 case 3:
1914 off = -1;
1915 }
1916 }
1917 B = s->current_picture.motion_val[0][xy - wrap + off];
1918
1919 if(!s->first_slice_line || (n==2 || n==3)) { // predictor A is not out of bounds
1920 if(s->mb_width == 1) {
1921 px = A[0];
1922 py = A[1];
1923 } else {
1924 px = mid_pred(A[0], B[0], C[0]);
1925 py = mid_pred(A[1], B[1], C[1]);
1926 }
1927 } else if(s->mb_x || (n==1 || n==3)) { // predictor C is not out of bounds
1928 px = C[0];
1929 py = C[1];
1930 } else {
1931 px = py = 0;
1932 }
1933 /* Pullback MV as specified in 8.3.5.3.4 */
1934 {
1935 int qx, qy, X, Y;
1936 qx = (s->mb_x << 6) + ((n==1 || n==3) ? 32 : 0);
1937 qy = (s->mb_y << 6) + ((n==2 || n==3) ? 32 : 0);
1938 X = (s->mb_width << 6) - 4;
1939 Y = (s->mb_height << 6) - 4;
1940 if(mv1) {
1941 if(qx + px < -60) px = -60 - qx;
1942 if(qy + py < -60) py = -60 - qy;
1943 } else {
1944 if(qx + px < -28) px = -28 - qx;
1945 if(qy + py < -28) py = -28 - qy;
1946 }
1947 if(qx + px > X) px = X - qx;
1948 if(qy + py > Y) py = Y - qy;
1949 }
1950 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
1951 if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
1952 if(is_intra[xy - wrap])
1953 sum = FFABS(px) + FFABS(py);
1954 else
1955 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
1956 if(sum > 32) {
1957 if(get_bits1(&s->gb)) {
1958 px = A[0];
1959 py = A[1];
1960 } else {
1961 px = C[0];
1962 py = C[1];
1963 }
1964 } else {
1965 if(is_intra[xy - 1])
1966 sum = FFABS(px) + FFABS(py);
1967 else
1968 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
1969 if(sum > 32) {
1970 if(get_bits1(&s->gb)) {
1971 px = A[0];
1972 py = A[1];
1973 } else {
1974 px = C[0];
1975 py = C[1];
1976 }
1977 }
1978 }
1979 }
1980 /* store MV using signed modulus of MV range defined in 4.11 */
1981 s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1982 s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1983 if(mv1) { /* duplicate motion data for 1-MV block */
1984 s->current_picture.motion_val[0][xy + 1][0] = s->current_picture.motion_val[0][xy][0];
1985 s->current_picture.motion_val[0][xy + 1][1] = s->current_picture.motion_val[0][xy][1];
1986 s->current_picture.motion_val[0][xy + wrap][0] = s->current_picture.motion_val[0][xy][0];
1987 s->current_picture.motion_val[0][xy + wrap][1] = s->current_picture.motion_val[0][xy][1];
1988 s->current_picture.motion_val[0][xy + wrap + 1][0] = s->current_picture.motion_val[0][xy][0];
1989 s->current_picture.motion_val[0][xy + wrap + 1][1] = s->current_picture.motion_val[0][xy][1];
1990 }
1991}
1992
1993/** Motion compensation for direct or interpolated blocks in B-frames
1994 */
1995static void vc1_interp_mc(VC1Context *v)
1996{
1997 MpegEncContext *s = &v->s;
1998 DSPContext *dsp = &v->s.dsp;
1999 uint8_t *srcY, *srcU, *srcV;
2000 int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
2001
2002 if(!v->s.next_picture.data[0])return;
2003
2004 mx = s->mv[1][0][0];
2005 my = s->mv[1][0][1];
2006 uvmx = (mx + ((mx & 3) == 3)) >> 1;
2007 uvmy = (my + ((my & 3) == 3)) >> 1;
2008 srcY = s->next_picture.data[0];
2009 srcU = s->next_picture.data[1];
2010 srcV = s->next_picture.data[2];
2011
2012 src_x = s->mb_x * 16 + (mx >> 2);
2013 src_y = s->mb_y * 16 + (my >> 2);
2014 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
2015 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
2016
2017 src_x = clip( src_x, -16, s->mb_width * 16);
2018 src_y = clip( src_y, -16, s->mb_height * 16);
2019 uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
2020 uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
2021
2022 srcY += src_y * s->linesize + src_x;
2023 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
2024 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
2025
2026 /* for grayscale we should not try to read from unknown area */
2027 if(s->flags & CODEC_FLAG_GRAY) {
2028 srcU = s->edge_emu_buffer + 18 * s->linesize;
2029 srcV = s->edge_emu_buffer + 18 * s->linesize;
2030 }
2031
2032 if(v->rangeredfrm
2033 || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
2034 || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
2035 uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
2036
2037 srcY -= s->mspel * (1 + s->linesize);
2038 ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
2039 src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
2040 srcY = s->edge_emu_buffer;
2041 ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
2042 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2043 ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
2044 uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
2045 srcU = uvbuf;
2046 srcV = uvbuf + 16;
2047 /* if we deal with range reduction we need to scale source blocks */
2048 if(v->rangeredfrm) {
2049 int i, j;
2050 uint8_t *src, *src2;
2051
2052 src = srcY;
2053 for(j = 0; j < 17 + s->mspel*2; j++) {
2054 for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
2055 src += s->linesize;
2056 }
2057 src = srcU; src2 = srcV;
2058 for(j = 0; j < 9; j++) {
2059 for(i = 0; i < 9; i++) {
2060 src[i] = ((src[i] - 128) >> 1) + 128;
2061 src2[i] = ((src2[i] - 128) >> 1) + 128;
2062 }
2063 src += s->uvlinesize;
2064 src2 += s->uvlinesize;
2065 }
2066 }
2067 srcY += s->mspel * (1 + s->linesize);
2068 }
2069
2070 if(v->fastuvmc) {
2071 uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
2072 uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
2073 }
2074
2075 mx >>= 1;
2076 my >>= 1;
2077 dxy = ((my & 1) << 1) | (mx & 1);
2078
2079 dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
2080
2081 if(s->flags & CODEC_FLAG_GRAY) return;
2082 /* Chroma MC always uses qpel blilinear */
2083 uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
2084 uvmx = (uvmx&3)<<1;
2085 uvmy = (uvmy&3)<<1;
2086 dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
2087 dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
2088}
2089
2090static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2091{
2092 int n = bfrac;
2093
2094#if B_FRACTION_DEN==256
2095 if(inv)
2096 n -= 256;
2097 if(!qs)
2098 return 2 * ((value * n + 255) >> 9);
2099 return (value * n + 128) >> 8;
2100#else
2101 if(inv)
2102 n -= B_FRACTION_DEN;
2103 if(!qs)
2104 return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2105 return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2106#endif
2107}
2108
2109/** Reconstruct motion vector for B-frame and do motion compensation
2110 */
2111static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
2112{
2113 if(v->use_ic) {
2114 v->mv_mode2 = v->mv_mode;
2115 v->mv_mode = MV_PMODE_INTENSITY_COMP;
2116 }
2117 if(direct) {
2118 vc1_mc_1mv(v, 0);
2119 vc1_interp_mc(v);
2120 if(v->use_ic) v->mv_mode = v->mv_mode2;
2121 return;
2122 }
2123 if(mode == BMV_TYPE_INTERPOLATED) {
2124 vc1_mc_1mv(v, 0);
2125 vc1_interp_mc(v);
2126 if(v->use_ic) v->mv_mode = v->mv_mode2;
2127 return;
2128 }
2129
2130 if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
2131 vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2132 if(v->use_ic) v->mv_mode = v->mv_mode2;
2133}
2134
2135static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
2136{
2137 MpegEncContext *s = &v->s;
2138 int xy, wrap, off = 0;
2139 int16_t *A, *B, *C;
2140 int px, py;
2141 int sum;
2142 int r_x, r_y;
2143 const uint8_t *is_intra = v->mb_type[0];
2144
2145 r_x = v->range_x;
2146 r_y = v->range_y;
2147 /* scale MV difference to be quad-pel */
2148 dmv_x[0] <<= 1 - s->quarter_sample;
2149 dmv_y[0] <<= 1 - s->quarter_sample;
2150 dmv_x[1] <<= 1 - s->quarter_sample;
2151 dmv_y[1] <<= 1 - s->quarter_sample;
2152
2153 wrap = s->b8_stride;
2154 xy = s->block_index[0];
2155
2156 if(s->mb_intra) {
2157 s->current_picture.motion_val[0][xy][0] =
2158 s->current_picture.motion_val[0][xy][1] =
2159 s->current_picture.motion_val[1][xy][0] =
2160 s->current_picture.motion_val[1][xy][1] = 0;
2161 return;
2162 }
2163 s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2164 s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2165 s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2166 s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2167 if(direct) {
2168 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2169 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2170 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2171 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2172 return;
2173 }
2174
2175 if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2176 C = s->current_picture.motion_val[0][xy - 2];
2177 A = s->current_picture.motion_val[0][xy - wrap*2];
2178 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2179 B = s->current_picture.motion_val[0][xy - wrap*2 + off];
2180
2181 if(!s->first_slice_line) { // predictor A is not out of bounds
2182 if(s->mb_width == 1) {
2183 px = A[0];
2184 py = A[1];
2185 } else {
2186 px = mid_pred(A[0], B[0], C[0]);
2187 py = mid_pred(A[1], B[1], C[1]);
2188 }
2189 } else if(s->mb_x) { // predictor C is not out of bounds
2190 px = C[0];
2191 py = C[1];
2192 } else {
2193 px = py = 0;
2194 }
2195 /* Pullback MV as specified in 8.3.5.3.4 */
2196 {
2197 int qx, qy, X, Y;
2198 if(v->profile < PROFILE_ADVANCED) {
2199 qx = (s->mb_x << 5);
2200 qy = (s->mb_y << 5);
2201 X = (s->mb_width << 5) - 4;
2202 Y = (s->mb_height << 5) - 4;
2203 if(qx + px < -28) px = -28 - qx;
2204 if(qy + py < -28) py = -28 - qy;
2205 if(qx + px > X) px = X - qx;
2206 if(qy + py > Y) py = Y - qy;
2207 } else {
2208 qx = (s->mb_x << 6);
2209 qy = (s->mb_y << 6);
2210 X = (s->mb_width << 6) - 4;
2211 Y = (s->mb_height << 6) - 4;
2212 if(qx + px < -60) px = -60 - qx;
2213 if(qy + py < -60) py = -60 - qy;
2214 if(qx + px > X) px = X - qx;
2215 if(qy + py > Y) py = Y - qy;
2216 }
2217 }
2218 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2219 if(0 && !s->first_slice_line && s->mb_x) {
2220 if(is_intra[xy - wrap])
2221 sum = FFABS(px) + FFABS(py);
2222 else
2223 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2224 if(sum > 32) {
2225 if(get_bits1(&s->gb)) {
2226 px = A[0];
2227 py = A[1];
2228 } else {
2229 px = C[0];
2230 py = C[1];
2231 }
2232 } else {
2233 if(is_intra[xy - 2])
2234 sum = FFABS(px) + FFABS(py);
2235 else
2236 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2237 if(sum > 32) {
2238 if(get_bits1(&s->gb)) {
2239 px = A[0];
2240 py = A[1];
2241 } else {
2242 px = C[0];
2243 py = C[1];
2244 }
2245 }
2246 }
2247 }
2248 /* store MV using signed modulus of MV range defined in 4.11 */
2249 s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2250 s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2251 }
2252 if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2253 C = s->current_picture.motion_val[1][xy - 2];
2254 A = s->current_picture.motion_val[1][xy - wrap*2];
2255 off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2256 B = s->current_picture.motion_val[1][xy - wrap*2 + off];
2257
2258 if(!s->first_slice_line) { // predictor A is not out of bounds
2259 if(s->mb_width == 1) {
2260 px = A[0];
2261 py = A[1];
2262 } else {
2263 px = mid_pred(A[0], B[0], C[0]);
2264 py = mid_pred(A[1], B[1], C[1]);
2265 }
2266 } else if(s->mb_x) { // predictor C is not out of bounds
2267 px = C[0];
2268 py = C[1];
2269 } else {
2270 px = py = 0;
2271 }
2272 /* Pullback MV as specified in 8.3.5.3.4 */
2273 {
2274 int qx, qy, X, Y;
2275 if(v->profile < PROFILE_ADVANCED) {
2276 qx = (s->mb_x << 5);
2277 qy = (s->mb_y << 5);
2278 X = (s->mb_width << 5) - 4;
2279 Y = (s->mb_height << 5) - 4;
2280 if(qx + px < -28) px = -28 - qx;
2281 if(qy + py < -28) py = -28 - qy;
2282 if(qx + px > X) px = X - qx;
2283 if(qy + py > Y) py = Y - qy;
2284 } else {
2285 qx = (s->mb_x << 6);
2286 qy = (s->mb_y << 6);
2287 X = (s->mb_width << 6) - 4;
2288 Y = (s->mb_height << 6) - 4;
2289 if(qx + px < -60) px = -60 - qx;
2290 if(qy + py < -60) py = -60 - qy;
2291 if(qx + px > X) px = X - qx;
2292 if(qy + py > Y) py = Y - qy;
2293 }
2294 }
2295 /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2296 if(0 && !s->first_slice_line && s->mb_x) {
2297 if(is_intra[xy - wrap])
2298 sum = FFABS(px) + FFABS(py);
2299 else
2300 sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2301 if(sum > 32) {
2302 if(get_bits1(&s->gb)) {
2303 px = A[0];
2304 py = A[1];
2305 } else {
2306 px = C[0];
2307 py = C[1];
2308 }
2309 } else {
2310 if(is_intra[xy - 2])
2311 sum = FFABS(px) + FFABS(py);
2312 else
2313 sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2314 if(sum > 32) {
2315 if(get_bits1(&s->gb)) {
2316 px = A[0];
2317 py = A[1];
2318 } else {
2319 px = C[0];
2320 py = C[1];
2321 }
2322 }
2323 }
2324 }
2325 /* store MV using signed modulus of MV range defined in 4.11 */
2326
2327 s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2328 s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2329 }
2330 s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2331 s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2332 s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2333 s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2334}
2335
2336/** Get predicted DC value for I-frames only
2337 * prediction dir: left=0, top=1
2338 * @param s MpegEncContext
2339 * @param[in] n block index in the current MB
2340 * @param dc_val_ptr Pointer to DC predictor
2341 * @param dir_ptr Prediction direction for use in AC prediction
2342 */
2343static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2344 int16_t **dc_val_ptr, int *dir_ptr)
2345{
2346 int a, b, c, wrap, pred, scale;
2347 int16_t *dc_val;
2348 static const uint16_t dcpred[32] = {
2349 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2350 114, 102, 93, 85, 79, 73, 68, 64,
2351 60, 57, 54, 51, 49, 47, 45, 43,
2352 41, 39, 38, 37, 35, 34, 33
2353 };
2354
2355 /* find prediction - wmv3_dc_scale always used here in fact */
2356 if (n < 4) scale = s->y_dc_scale;
2357 else scale = s->c_dc_scale;
2358
2359 wrap = s->block_wrap[n];
2360 dc_val= s->dc_val[0] + s->block_index[n];
2361
2362 /* B A
2363 * C X
2364 */
2365 c = dc_val[ - 1];
2366 b = dc_val[ - 1 - wrap];
2367 a = dc_val[ - wrap];
2368
2369 if (pq < 9 || !overlap)
2370 {
2371 /* Set outer values */
2372 if (s->first_slice_line && (n!=2 && n!=3)) b=a=dcpred[scale];
2373 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=dcpred[scale];
2374 }
2375 else
2376 {
2377 /* Set outer values */
2378 if (s->first_slice_line && (n!=2 && n!=3)) b=a=0;
2379 if (s->mb_x == 0 && (n!=1 && n!=3)) b=c=0;
2380 }
2381
2382 if (abs(a - b) <= abs(b - c)) {
2383 pred = c;
2384 *dir_ptr = 1;//left
2385 } else {
2386 pred = a;
2387 *dir_ptr = 0;//top
2388 }
2389
2390 /* update predictor */
2391 *dc_val_ptr = &dc_val[0];
2392 return pred;
2393}
2394
2395
2396/** Get predicted DC value
2397 * prediction dir: left=0, top=1
2398 * @param s MpegEncContext
2399 * @param[in] n block index in the current MB
2400 * @param dc_val_ptr Pointer to DC predictor
2401 * @param dir_ptr Prediction direction for use in AC prediction
2402 */
2403static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2404 int a_avail, int c_avail,
2405 int16_t **dc_val_ptr, int *dir_ptr)
2406{
2407 int a, b, c, wrap, pred, scale;
2408 int16_t *dc_val;
2409 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2410 int q1, q2 = 0;
2411
2412 /* find prediction - wmv3_dc_scale always used here in fact */
2413 if (n < 4) scale = s->y_dc_scale;
2414 else scale = s->c_dc_scale;
2415
2416 wrap = s->block_wrap[n];
2417 dc_val= s->dc_val[0] + s->block_index[n];
2418
2419 /* B A
2420 * C X
2421 */
2422 c = dc_val[ - 1];
2423 b = dc_val[ - 1 - wrap];
2424 a = dc_val[ - wrap];
2425 /* scale predictors if needed */
2426 q1 = s->current_picture.qscale_table[mb_pos];
2427 if(c_avail && (n!= 1 && n!=3)) {
2428 q2 = s->current_picture.qscale_table[mb_pos - 1];
2429 if(q2 && q2 != q1)
2430 c = (c * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2431 }
2432 if(a_avail && (n!= 2 && n!=3)) {
2433 q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2434 if(q2 && q2 != q1)
2435 a = (a * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2436 }
2437 if(a_avail && c_avail && (n!=3)) {
2438 int off = mb_pos;
2439 if(n != 1) off--;
2440 if(n != 2) off -= s->mb_stride;
2441 q2 = s->current_picture.qscale_table[off];
2442 if(q2 && q2 != q1)
2443 b = (b * s->y_dc_scale_table[q2] * vc1_dqscale[s->y_dc_scale_table[q1] - 1] + 0x20000) >> 18;
2444 }
2445
2446 if(a_avail && c_avail) {
2447 if(abs(a - b) <= abs(b - c)) {
2448 pred = c;
2449 *dir_ptr = 1;//left
2450 } else {
2451 pred = a;
2452 *dir_ptr = 0;//top
2453 }
2454 } else if(a_avail) {
2455 pred = a;
2456 *dir_ptr = 0;//top
2457 } else if(c_avail) {
2458 pred = c;
2459 *dir_ptr = 1;//left
2460 } else {
2461 pred = 0;
2462 *dir_ptr = 1;//left
2463 }
2464
2465 /* update predictor */
2466 *dc_val_ptr = &dc_val[0];
2467 return pred;
2468}
2469
2470
2471/**
2472 * @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
2473 * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2474 * @{
2475 */
2476
2477static inline int vc1_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
2478{
2479 int xy, wrap, pred, a, b, c;
2480
2481 xy = s->block_index[n];
2482 wrap = s->b8_stride;
2483
2484 /* B C
2485 * A X
2486 */
2487 a = s->coded_block[xy - 1 ];
2488 b = s->coded_block[xy - 1 - wrap];
2489 c = s->coded_block[xy - wrap];
2490
2491 if (b == c) {
2492 pred = a;
2493 } else {
2494 pred = c;
2495 }
2496
2497 /* store value */
2498 *coded_block_ptr = &s->coded_block[xy];
2499
2500 return pred;
2501}
2502
2503/**
2504 * Decode one AC coefficient
2505 * @param v The VC1 context
2506 * @param last Last coefficient
2507 * @param skip How much zero coefficients to skip
2508 * @param value Decoded AC coefficient value
2509 * @see 8.1.3.4
2510 */
2511static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
2512{
2513 GetBitContext *gb = &v->s.gb;
2514 int index, escape, run = 0, level = 0, lst = 0;
2515
2516 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2517 if (index != vc1_ac_sizes[codingset] - 1) {
2518 run = vc1_index_decode_table[codingset][index][0];
2519 level = vc1_index_decode_table[codingset][index][1];
2520 lst = index >= vc1_last_decode_table[codingset];
2521 if(get_bits(gb, 1))
2522 level = -level;
2523 } else {
2524 escape = decode210(gb);
2525 if (escape != 2) {
2526 index = get_vlc2(gb, vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2527 run = vc1_index_decode_table[codingset][index][0];
2528 level = vc1_index_decode_table[codingset][index][1];
2529 lst = index >= vc1_last_decode_table[codingset];
2530 if(escape == 0) {
2531 if(lst)
2532 level += vc1_last_delta_level_table[codingset][run];
2533 else
2534 level += vc1_delta_level_table[codingset][run];
2535 } else {
2536 if(lst)
2537 run += vc1_last_delta_run_table[codingset][level] + 1;
2538 else
2539 run += vc1_delta_run_table[codingset][level] + 1;
2540 }
2541 if(get_bits(gb, 1))
2542 level = -level;
2543 } else {
2544 int sign;
2545 lst = get_bits(gb, 1);
2546 if(v->s.esc3_level_length == 0) {
2547 if(v->pq < 8 || v->dquantfrm) { // table 59
2548 v->s.esc3_level_length = get_bits(gb, 3);
2549 if(!v->s.esc3_level_length)
2550 v->s.esc3_level_length = get_bits(gb, 2) + 8;
2551 } else { //table 60
2552 v->s.esc3_level_length = get_prefix(gb, 1, 6) + 2;
2553 }
2554 v->s.esc3_run_length = 3 + get_bits(gb, 2);
2555 }
2556 run = get_bits(gb, v->s.esc3_run_length);
2557 sign = get_bits(gb, 1);
2558 level = get_bits(gb, v->s.esc3_level_length);
2559 if(sign)
2560 level = -level;
2561 }
2562 }
2563
2564 *last = lst;
2565 *skip = run;
2566 *value = level;
2567}
2568
2569/** Decode intra block in intra frames - should be faster than decode_intra_block
2570 * @param v VC1Context
2571 * @param block block to decode
2572 * @param coded are AC coeffs present or not
2573 * @param codingset set of VLC to decode data
2574 */
2575static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset)
2576{
2577 GetBitContext *gb = &v->s.gb;
2578 MpegEncContext *s = &v->s;
2579 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2580 int run_diff, i;
2581 int16_t *dc_val;
2582 int16_t *ac_val, *ac_val2;
2583 int dcdiff;
2584
2585 /* Get DC differential */
2586 if (n < 4) {
2587 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2588 } else {
2589 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2590 }
2591 if (dcdiff < 0){
2592 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2593 return -1;
2594 }
2595 if (dcdiff)
2596 {
2597 if (dcdiff == 119 /* ESC index value */)
2598 {
2599 /* TODO: Optimize */
2600 if (v->pq == 1) dcdiff = get_bits(gb, 10);
2601 else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2602 else dcdiff = get_bits(gb, 8);
2603 }
2604 else
2605 {
2606 if (v->pq == 1)
2607 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2608 else if (v->pq == 2)
2609 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2610 }
2611 if (get_bits(gb, 1))
2612 dcdiff = -dcdiff;
2613 }
2614
2615 /* Prediction */
2616 dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2617 *dc_val = dcdiff;
2618
2619 /* Store the quantized DC coeff, used for prediction */
2620 if (n < 4) {
2621 block[0] = dcdiff * s->y_dc_scale;
2622 } else {
2623 block[0] = dcdiff * s->c_dc_scale;
2624 }
2625 /* Skip ? */
2626 run_diff = 0;
2627 i = 0;
2628 if (!coded) {
2629 goto not_coded;
2630 }
2631
2632 //AC Decoding
2633 i = 1;
2634
2635 {
2636 int last = 0, skip, value;
2637 const int8_t *zz_table;
2638 int scale;
2639 int k;
2640
2641 scale = v->pq * 2 + v->halfpq;
2642
2643 if(v->s.ac_pred) {
2644 if(!dc_pred_dir)
2645 zz_table = vc1_horizontal_zz;
2646 else
2647 zz_table = vc1_vertical_zz;
2648 } else
2649 zz_table = vc1_normal_zz;
2650
2651 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2652 ac_val2 = ac_val;
2653 if(dc_pred_dir) //left
2654 ac_val -= 16;
2655 else //top
2656 ac_val -= 16 * s->block_wrap[n];
2657
2658 while (!last) {
2659 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2660 i += skip;
2661 if(i > 63)
2662 break;
2663 block[zz_table[i++]] = value;
2664 }
2665
2666 /* apply AC prediction if needed */
2667 if(s->ac_pred) {
2668 if(dc_pred_dir) { //left
2669 for(k = 1; k < 8; k++)
2670 block[k << 3] += ac_val[k];
2671 } else { //top
2672 for(k = 1; k < 8; k++)
2673 block[k] += ac_val[k + 8];
2674 }
2675 }
2676 /* save AC coeffs for further prediction */
2677 for(k = 1; k < 8; k++) {
2678 ac_val2[k] = block[k << 3];
2679 ac_val2[k + 8] = block[k];
2680 }
2681
2682 /* scale AC coeffs */
2683 for(k = 1; k < 64; k++)
2684 if(block[k]) {
2685 block[k] *= scale;
2686 if(!v->pquantizer)
2687 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2688 }
2689
2690 if(s->ac_pred) i = 63;
2691 }
2692
2693not_coded:
2694 if(!coded) {
2695 int k, scale;
2696 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2697 ac_val2 = ac_val;
2698
2699 scale = v->pq * 2 + v->halfpq;
2700 memset(ac_val2, 0, 16 * 2);
2701 if(dc_pred_dir) {//left
2702 ac_val -= 16;
2703 if(s->ac_pred)
2704 memcpy(ac_val2, ac_val, 8 * 2);
2705 } else {//top
2706 ac_val -= 16 * s->block_wrap[n];
2707 if(s->ac_pred)
2708 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2709 }
2710
2711 /* apply AC prediction if needed */
2712 if(s->ac_pred) {
2713 if(dc_pred_dir) { //left
2714 for(k = 1; k < 8; k++) {
2715 block[k << 3] = ac_val[k] * scale;
2716 if(!v->pquantizer && block[k << 3])
2717 block[k << 3] += (block[k << 3] < 0) ? -v->pq : v->pq;
2718 }
2719 } else { //top
2720 for(k = 1; k < 8; k++) {
2721 block[k] = ac_val[k + 8] * scale;
2722 if(!v->pquantizer && block[k])
2723 block[k] += (block[k] < 0) ? -v->pq : v->pq;
2724 }
2725 }
2726 i = 63;
2727 }
2728 }
2729 s->block_last_index[n] = i;
2730
2731 return 0;
2732}
2733
2734/** Decode intra block in intra frames - should be faster than decode_intra_block
2735 * @param v VC1Context
2736 * @param block block to decode
2737 * @param coded are AC coeffs present or not
2738 * @param codingset set of VLC to decode data
2739 */
2740static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
2741{
2742 GetBitContext *gb = &v->s.gb;
2743 MpegEncContext *s = &v->s;
2744 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2745 int run_diff, i;
2746 int16_t *dc_val;
2747 int16_t *ac_val, *ac_val2;
2748 int dcdiff;
2749 int a_avail = v->a_avail, c_avail = v->c_avail;
2750 int use_pred = s->ac_pred;
2751 int scale;
2752 int q1, q2 = 0;
2753 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2754
2755 /* Get DC differential */
2756 if (n < 4) {
2757 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2758 } else {
2759 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2760 }
2761 if (dcdiff < 0){
2762 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2763 return -1;
2764 }
2765 if (dcdiff)
2766 {
2767 if (dcdiff == 119 /* ESC index value */)
2768 {
2769 /* TODO: Optimize */
2770 if (mquant == 1) dcdiff = get_bits(gb, 10);
2771 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2772 else dcdiff = get_bits(gb, 8);
2773 }
2774 else
2775 {
2776 if (mquant == 1)
2777 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2778 else if (mquant == 2)
2779 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2780 }
2781 if (get_bits(gb, 1))
2782 dcdiff = -dcdiff;
2783 }
2784
2785 /* Prediction */
2786 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2787 *dc_val = dcdiff;
2788
2789 /* Store the quantized DC coeff, used for prediction */
2790 if (n < 4) {
2791 block[0] = dcdiff * s->y_dc_scale;
2792 } else {
2793 block[0] = dcdiff * s->c_dc_scale;
2794 }
2795 /* Skip ? */
2796 run_diff = 0;
2797 i = 0;
2798
2799 //AC Decoding
2800 i = 1;
2801
2802 /* check if AC is needed at all and adjust direction if needed */
2803 if(!a_avail) dc_pred_dir = 1;
2804 if(!c_avail) dc_pred_dir = 0;
2805 if(!a_avail && !c_avail) use_pred = 0;
2806 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2807 ac_val2 = ac_val;
2808
2809 scale = mquant * 2 + v->halfpq;
2810
2811 if(dc_pred_dir) //left
2812 ac_val -= 16;
2813 else //top
2814 ac_val -= 16 * s->block_wrap[n];
2815
2816 q1 = s->current_picture.qscale_table[mb_pos];
2817 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
2818 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2819 if(n && n<4) q2 = q1;
2820
2821 if(coded) {
2822 int last = 0, skip, value;
2823 const int8_t *zz_table;
2824 int k;
2825
2826 if(v->s.ac_pred) {
2827 if(!dc_pred_dir)
2828 zz_table = vc1_horizontal_zz;
2829 else
2830 zz_table = vc1_vertical_zz;
2831 } else
2832 zz_table = vc1_normal_zz;
2833
2834 while (!last) {
2835 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2836 i += skip;
2837 if(i > 63)
2838 break;
2839 block[zz_table[i++]] = value;
2840 }
2841
2842 /* apply AC prediction if needed */
2843 if(use_pred) {
2844 /* scale predictors if needed*/
2845 if(q2 && q1!=q2) {
2846 q1 = q1 * 2 - 1;
2847 q2 = q2 * 2 - 1;
2848
2849 if(dc_pred_dir) { //left
2850 for(k = 1; k < 8; k++)
2851 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2852 } else { //top
2853 for(k = 1; k < 8; k++)
2854 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2855 }
2856 } else {
2857 if(dc_pred_dir) { //left
2858 for(k = 1; k < 8; k++)
2859 block[k << 3] += ac_val[k];
2860 } else { //top
2861 for(k = 1; k < 8; k++)
2862 block[k] += ac_val[k + 8];
2863 }
2864 }
2865 }
2866 /* save AC coeffs for further prediction */
2867 for(k = 1; k < 8; k++) {
2868 ac_val2[k] = block[k << 3];
2869 ac_val2[k + 8] = block[k];
2870 }
2871
2872 /* scale AC coeffs */
2873 for(k = 1; k < 64; k++)
2874 if(block[k]) {
2875 block[k] *= scale;
2876 if(!v->pquantizer)
2877 block[k] += (block[k] < 0) ? -mquant : mquant;
2878 }
2879
2880 if(use_pred) i = 63;
2881 } else { // no AC coeffs
2882 int k;
2883
2884 memset(ac_val2, 0, 16 * 2);
2885 if(dc_pred_dir) {//left
2886 if(use_pred) {
2887 memcpy(ac_val2, ac_val, 8 * 2);
2888 if(q2 && q1!=q2) {
2889 q1 = q1 * 2 - 1;
2890 q2 = q2 * 2 - 1;
2891 for(k = 1; k < 8; k++)
2892 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2893 }
2894 }
2895 } else {//top
2896 if(use_pred) {
2897 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2898 if(q2 && q1!=q2) {
2899 q1 = q1 * 2 - 1;
2900 q2 = q2 * 2 - 1;
2901 for(k = 1; k < 8; k++)
2902 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2903 }
2904 }
2905 }
2906
2907 /* apply AC prediction if needed */
2908 if(use_pred) {
2909 if(dc_pred_dir) { //left
2910 for(k = 1; k < 8; k++) {
2911 block[k << 3] = ac_val2[k] * scale;
2912 if(!v->pquantizer && block[k << 3])
2913 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
2914 }
2915 } else { //top
2916 for(k = 1; k < 8; k++) {
2917 block[k] = ac_val2[k + 8] * scale;
2918 if(!v->pquantizer && block[k])
2919 block[k] += (block[k] < 0) ? -mquant : mquant;
2920 }
2921 }
2922 i = 63;
2923 }
2924 }
2925 s->block_last_index[n] = i;
2926
2927 return 0;
2928}
2929
2930/** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2931 * @param v VC1Context
2932 * @param block block to decode
2933 * @param coded are AC coeffs present or not
2934 * @param mquant block quantizer
2935 * @param codingset set of VLC to decode data
2936 */
2937static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int coded, int mquant, int codingset)
2938{
2939 GetBitContext *gb = &v->s.gb;
2940 MpegEncContext *s = &v->s;
2941 int dc_pred_dir = 0; /* Direction of the DC prediction used */
2942 int run_diff, i;
2943 int16_t *dc_val;
2944 int16_t *ac_val, *ac_val2;
2945 int dcdiff;
2946 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2947 int a_avail = v->a_avail, c_avail = v->c_avail;
2948 int use_pred = s->ac_pred;
2949 int scale;
2950 int q1, q2 = 0;
2951
2952 /* XXX: Guard against dumb values of mquant */
2953 mquant = (mquant < 1) ? 0 : ( (mquant>31) ? 31 : mquant );
2954
2955 /* Set DC scale - y and c use the same */
2956 s->y_dc_scale = s->y_dc_scale_table[mquant];
2957 s->c_dc_scale = s->c_dc_scale_table[mquant];
2958
2959 /* Get DC differential */
2960 if (n < 4) {
2961 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2962 } else {
2963 dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
2964 }
2965 if (dcdiff < 0){
2966 av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2967 return -1;
2968 }
2969 if (dcdiff)
2970 {
2971 if (dcdiff == 119 /* ESC index value */)
2972 {
2973 /* TODO: Optimize */
2974 if (mquant == 1) dcdiff = get_bits(gb, 10);
2975 else if (mquant == 2) dcdiff = get_bits(gb, 9);
2976 else dcdiff = get_bits(gb, 8);
2977 }
2978 else
2979 {
2980 if (mquant == 1)
2981 dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
2982 else if (mquant == 2)
2983 dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
2984 }
2985 if (get_bits(gb, 1))
2986 dcdiff = -dcdiff;
2987 }
2988
2989 /* Prediction */
2990 dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2991 *dc_val = dcdiff;
2992
2993 /* Store the quantized DC coeff, used for prediction */
2994
2995 if (n < 4) {
2996 block[0] = dcdiff * s->y_dc_scale;
2997 } else {
2998 block[0] = dcdiff * s->c_dc_scale;
2999 }
3000 /* Skip ? */
3001 run_diff = 0;
3002 i = 0;
3003
3004 //AC Decoding
3005 i = 1;
3006
3007 /* check if AC is needed at all and adjust direction if needed */
3008 if(!a_avail) dc_pred_dir = 1;
3009 if(!c_avail) dc_pred_dir = 0;
3010 if(!a_avail && !c_avail) use_pred = 0;
3011 ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3012 ac_val2 = ac_val;
3013
3014 scale = mquant * 2 + v->halfpq;
3015
3016 if(dc_pred_dir) //left
3017 ac_val -= 16;
3018 else //top
3019 ac_val -= 16 * s->block_wrap[n];
3020
3021 q1 = s->current_picture.qscale_table[mb_pos];
3022 if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
3023 if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3024 if(n && n<4) q2 = q1;
3025
3026 if(coded) {
3027 int last = 0, skip, value;
3028 const int8_t *zz_table;
3029 int k;
3030
3031 zz_table = vc1_simple_progressive_8x8_zz;
3032
3033 while (!last) {
3034 vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3035 i += skip;
3036 if(i > 63)
3037 break;
3038 block[zz_table[i++]] = value;
3039 }
3040
3041 /* apply AC prediction if needed */
3042 if(use_pred) {
3043 /* scale predictors if needed*/
3044 if(q2 && q1!=q2) {
3045 q1 = q1 * 2 - 1;
3046 q2 = q2 * 2 - 1;
3047
3048 if(dc_pred_dir) { //left
3049 for(k = 1; k < 8; k++)
3050 block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3051 } else { //top
3052 for(k = 1; k < 8; k++)
3053 block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3054 }
3055 } else {
3056 if(dc_pred_dir) { //left
3057 for(k = 1; k < 8; k++)
3058 block[k << 3] += ac_val[k];
3059 } else { //top
3060 for(k = 1; k < 8; k++)
3061 block[k] += ac_val[k + 8];
3062 }
3063 }
3064 }
3065 /* save AC coeffs for further prediction */
3066 for(k = 1; k < 8; k++) {
3067 ac_val2[k] = block[k << 3];
3068 ac_val2[k + 8] = block[k];
3069 }
3070
3071 /* scale AC coeffs */
3072 for(k = 1; k < 64; k++)
3073 if(block[k]) {
3074 block[k] *= scale;
3075 if(!v->pquantizer)
3076 block[k] += (block[k] < 0) ? -mquant : mquant;
3077 }
3078
3079 if(use_pred) i = 63;
3080 } else { // no AC coeffs
3081 int k;
3082
3083 memset(ac_val2, 0, 16 * 2);
3084 if(dc_pred_dir) {//left
3085 if(use_pred) {
3086 memcpy(ac_val2, ac_val, 8 * 2);
3087 if(q2 && q1!=q2) {
3088 q1 = q1 * 2 - 1;
3089 q2 = q2 * 2 - 1;
3090 for(k = 1; k < 8; k++)
3091 ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3092 }
3093 }
3094 } else {//top
3095 if(use_pred) {
3096 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3097 if(q2 && q1!=q2) {
3098 q1 = q1 * 2 - 1;
3099 q2 = q2 * 2 - 1;
3100 for(k = 1; k < 8; k++)
3101 ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3102 }
3103 }
3104 }
3105
3106 /* apply AC prediction if needed */
3107 if(use_pred) {
3108 if(dc_pred_dir) { //left
3109 for(k = 1; k < 8; k++) {
3110 block[k << 3] = ac_val2[k] * scale;
3111 if(!v->pquantizer && block[k << 3])
3112 block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
3113 }
3114 } else { //top
3115 for(k = 1; k < 8; k++) {
3116 block[k] = ac_val2[k + 8] * scale;
3117 if(!v->pquantizer && block[k])
3118 block[k] += (block[k] < 0) ? -mquant : mquant;
3119 }
3120 }
3121 i = 63;
3122 }
3123 }
3124 s->block_last_index[n] = i;
3125
3126 return 0;
3127}
3128
3129/** Decode P block
3130 */
3131static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquant, int ttmb, int first_block)
3132{
3133 MpegEncContext *s = &v->s;
3134 GetBitContext *gb = &s->gb;
3135 int i, j;
3136 int subblkpat = 0;
3137 int scale, off, idx, last, skip, value;
3138 int ttblk = ttmb & 7;
3139
3140 if(ttmb == -1) {
3141 ttblk = ttblk_to_tt[v->tt_index][get_vlc2(gb, vc1_ttblk_vlc[v->tt_index].table, VC1_TTBLK_VLC_BITS, 1)];
3142 }
3143 if(ttblk == TT_4X4) {
3144 subblkpat = ~(get_vlc2(gb, vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3145 }
3146 if((ttblk != TT_8X8 && ttblk != TT_4X4) && (v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))) {
3147 subblkpat = decode012(gb);
3148 if(subblkpat) subblkpat ^= 3; //swap decoded pattern bits
3149 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) ttblk = TT_8X4;
3150 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) ttblk = TT_4X8;
3151 }
3152 scale = 2 * mquant + v->halfpq;
3153
3154 // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3155 if(ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3156 subblkpat = 2 - (ttblk == TT_8X4_TOP);
3157 ttblk = TT_8X4;
3158 }
3159 if(ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3160 subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3161 ttblk = TT_4X8;
3162 }
3163 switch(ttblk) {
3164 case TT_8X8:
3165 i = 0;
3166 last = 0;
3167 while (!last) {
3168 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3169 i += skip;
3170 if(i > 63)
3171 break;
3172 idx = vc1_simple_progressive_8x8_zz[i++];
3173 block[idx] = value * scale;
3174 if(!v->pquantizer)
3175 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3176 }
3177 s->dsp.vc1_inv_trans_8x8(block);
3178 break;
3179 case TT_4X4:
3180 for(j = 0; j < 4; j++) {
3181 last = subblkpat & (1 << (3 - j));
3182 i = 0;
3183 off = (j & 1) * 4 + (j & 2) * 16;
3184 while (!last) {
3185 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3186 i += skip;
3187 if(i > 15)
3188 break;
3189 idx = vc1_simple_progressive_4x4_zz[i++];
3190 block[idx + off] = value * scale;
3191 if(!v->pquantizer)
3192 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3193 }
3194 if(!(subblkpat & (1 << (3 - j))))
3195 s->dsp.vc1_inv_trans_4x4(block, j);
3196 }
3197 break;
3198 case TT_8X4:
3199 for(j = 0; j < 2; j++) {
3200 last = subblkpat & (1 << (1 - j));
3201 i = 0;
3202 off = j * 32;
3203 while (!last) {
3204 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3205 i += skip;
3206 if(i > 31)
3207 break;
3208 if(v->profile < PROFILE_ADVANCED)
3209 idx = vc1_simple_progressive_8x4_zz[i++];
3210 else
3211 idx = vc1_adv_progressive_8x4_zz[i++];
3212 block[idx + off] = value * scale;
3213 if(!v->pquantizer)
3214 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3215 }
3216 if(!(subblkpat & (1 << (1 - j))))
3217 s->dsp.vc1_inv_trans_8x4(block, j);
3218 }
3219 break;
3220 case TT_4X8:
3221 for(j = 0; j < 2; j++) {
3222 last = subblkpat & (1 << (1 - j));
3223 i = 0;
3224 off = j * 4;
3225 while (!last) {
3226 vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3227 i += skip;
3228 if(i > 31)
3229 break;
3230 if(v->profile < PROFILE_ADVANCED)
3231 idx = vc1_simple_progressive_4x8_zz[i++];
3232 else
3233 idx = vc1_adv_progressive_4x8_zz[i++];
3234 block[idx + off] = value * scale;
3235 if(!v->pquantizer)
3236 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3237 }
3238 if(!(subblkpat & (1 << (1 - j))))
3239 s->dsp.vc1_inv_trans_4x8(block, j);
3240 }
3241 break;
3242 }
3243 return 0;
3244}
3245
3246
3247/** Decode one P-frame MB (in Simple/Main profile)
3248 */
3249static int vc1_decode_p_mb(VC1Context *v)
3250{
3251 MpegEncContext *s = &v->s;
3252 GetBitContext *gb = &s->gb;
3253 int i, j;
3254 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3255 int cbp; /* cbp decoding stuff */
3256 int mqdiff, mquant; /* MB quantization */
3257 int ttmb = v->ttfrm; /* MB Transform type */
3258 int status;
3259
3260 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3261 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3262 int mb_has_coeffs = 1; /* last_flag */
3263 int dmv_x, dmv_y; /* Differential MV components */
3264 int index, index1; /* LUT indices */
3265 int val, sign; /* temp values */
3266 int first_block = 1;
3267 int dst_idx, off;
3268 int skipped, fourmv;
3269
3270 mquant = v->pq; /* Loosy initialization */
3271
3272 if (v->mv_type_is_raw)
3273 fourmv = get_bits1(gb);
3274 else
3275 fourmv = v->mv_type_mb_plane[mb_pos];
3276 if (v->skip_is_raw)
3277 skipped = get_bits1(gb);
3278 else
3279 skipped = v->s.mbskip_table[mb_pos];
3280
3281 s->dsp.clear_blocks(s->block[0]);
3282
3283 if (!fourmv) /* 1MV mode */
3284 {
3285 if (!skipped)
3286 {
3287 GET_MVDATA(dmv_x, dmv_y);
3288
3289 if (s->mb_intra) {
3290 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3291 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3292 }
3293 s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
3294 vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3295
3296 /* FIXME Set DC val for inter block ? */
3297 if (s->mb_intra && !mb_has_coeffs)
3298 {
3299 GET_MQUANT();
3300 s->ac_pred = get_bits(gb, 1);
3301 cbp = 0;
3302 }
3303 else if (mb_has_coeffs)
3304 {
3305 if (s->mb_intra) s->ac_pred = get_bits(gb, 1);
3306 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3307 GET_MQUANT();
3308 }
3309 else
3310 {
3311 mquant = v->pq;
3312 cbp = 0;
3313 }
3314 s->current_picture.qscale_table[mb_pos] = mquant;
3315
3316 if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3317 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table,
3318 VC1_TTMB_VLC_BITS, 2);
3319 if(!s->mb_intra) vc1_mc_1mv(v, 0);
3320 dst_idx = 0;
3321 for (i=0; i<6; i++)
3322 {
3323 s->dc_val[0][s->block_index[i]] = 0;
3324 dst_idx += i >> 2;
3325 val = ((cbp >> (5 - i)) & 1);
3326 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3327 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3328 if(s->mb_intra) {
3329 /* check if prediction blocks A and C are available */
3330 v->a_avail = v->c_avail = 0;
3331 if(i == 2 || i == 3 || !s->first_slice_line)
3332 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3333 if(i == 1 || i == 3 || s->mb_x)
3334 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3335
3336 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3337 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3338 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3339 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3340 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3341 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3342 if(v->pq >= 9 && v->overlap) {
3343 if(v->a_avail)
3344 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3345 if(v->c_avail)
3346 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3347 }
3348 } else if(val) {
3349 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3350 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3351 first_block = 0;
3352 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3353 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3354 }
3355 }
3356 }
3357 else //Skipped
3358 {
3359 s->mb_intra = 0;
3360 for(i = 0; i < 6; i++) {
3361 v->mb_type[0][s->block_index[i]] = 0;
3362 s->dc_val[0][s->block_index[i]] = 0;
3363 }
3364 s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3365 s->current_picture.qscale_table[mb_pos] = 0;
3366 vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3367 vc1_mc_1mv(v, 0);
3368 return 0;
3369 }
3370 } //1MV mode
3371 else //4MV mode
3372 {
3373 if (!skipped /* unskipped MB */)
3374 {
3375 int intra_count = 0, coded_inter = 0;
3376 int is_intra[6], is_coded[6];
3377 /* Get CBPCY */
3378 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3379 for (i=0; i<6; i++)
3380 {
3381 val = ((cbp >> (5 - i)) & 1);
3382 s->dc_val[0][s->block_index[i]] = 0;
3383 s->mb_intra = 0;
3384 if(i < 4) {
3385 dmv_x = dmv_y = 0;
3386 s->mb_intra = 0;
3387 mb_has_coeffs = 0;
3388 if(val) {
3389 GET_MVDATA(dmv_x, dmv_y);
3390 }
3391 vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3392 if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
3393 intra_count += s->mb_intra;
3394 is_intra[i] = s->mb_intra;
3395 is_coded[i] = mb_has_coeffs;
3396 }
3397 if(i&4){
3398 is_intra[i] = (intra_count >= 3);
3399 is_coded[i] = val;
3400 }
3401 if(i == 4) vc1_mc_4mv_chroma(v);
3402 v->mb_type[0][s->block_index[i]] = is_intra[i];
3403 if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
3404 }
3405 // if there are no coded blocks then don't do anything more
3406 if(!intra_count && !coded_inter) return 0;
3407 dst_idx = 0;
3408 GET_MQUANT();
3409 s->current_picture.qscale_table[mb_pos] = mquant;
3410 /* test if block is intra and has pred */
3411 {
3412 int intrapred = 0;
3413 for(i=0; i<6; i++)
3414 if(is_intra[i]) {
3415 if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3416 || ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
3417 intrapred = 1;
3418 break;
3419 }
3420 }
3421 if(intrapred)s->ac_pred = get_bits(gb, 1);
3422 else s->ac_pred = 0;
3423 }
3424 if (!v->ttmbf && coded_inter)
3425 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3426 for (i=0; i<6; i++)
3427 {
3428 dst_idx += i >> 2;
3429 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3430 s->mb_intra = is_intra[i];
3431 if (is_intra[i]) {
3432 /* check if prediction blocks A and C are available */
3433 v->a_avail = v->c_avail = 0;
3434 if(i == 2 || i == 3 || !s->first_slice_line)
3435 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3436 if(i == 1 || i == 3 || s->mb_x)
3437 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3438
3439 vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
3440 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3441 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3442 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3443 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3444 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3445 if(v->pq >= 9 && v->overlap) {
3446 if(v->a_avail)
3447 s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
3448 if(v->c_avail)
3449 s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
3450 }
3451 } else if(is_coded[i]) {
3452 status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3453 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3454 first_block = 0;
3455 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3456 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3457 }
3458 }
3459 return status;
3460 }
3461 else //Skipped MB
3462 {
3463 s->mb_intra = 0;
3464 s->current_picture.qscale_table[mb_pos] = 0;
3465 for (i=0; i<6; i++) {
3466 v->mb_type[0][s->block_index[i]] = 0;
3467 s->dc_val[0][s->block_index[i]] = 0;
3468 }
3469 for (i=0; i<4; i++)
3470 {
3471 vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
3472 vc1_mc_4mv_luma(v, i);
3473 }
3474 vc1_mc_4mv_chroma(v);
3475 s->current_picture.qscale_table[mb_pos] = 0;
3476 return 0;
3477 }
3478 }
3479
3480 /* Should never happen */
3481 return -1;
3482}
3483
3484/** Decode one B-frame MB (in Main profile)
3485 */
3486static void vc1_decode_b_mb(VC1Context *v)
3487{
3488 MpegEncContext *s = &v->s;
3489 GetBitContext *gb = &s->gb;
3490 int i, j;
3491 int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3492 int cbp = 0; /* cbp decoding stuff */
3493 int mqdiff, mquant; /* MB quantization */
3494 int ttmb = v->ttfrm; /* MB Transform type */
3495
3496 static const int size_table[6] = { 0, 2, 3, 4, 5, 8 },
3497 offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3498 int mb_has_coeffs = 0; /* last_flag */
3499 int index, index1; /* LUT indices */
3500 int val, sign; /* temp values */
3501 int first_block = 1;
3502 int dst_idx, off;
3503 int skipped, direct;
3504 int dmv_x[2], dmv_y[2];
3505 int bmvtype = BMV_TYPE_BACKWARD;
3506
3507 mquant = v->pq; /* Loosy initialization */
3508 s->mb_intra = 0;
3509
3510 if (v->dmb_is_raw)
3511 direct = get_bits1(gb);
3512 else
3513 direct = v->direct_mb_plane[mb_pos];
3514 if (v->skip_is_raw)
3515 skipped = get_bits1(gb);
3516 else
3517 skipped = v->s.mbskip_table[mb_pos];
3518
3519 s->dsp.clear_blocks(s->block[0]);
3520 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
3521 for(i = 0; i < 6; i++) {
3522 v->mb_type[0][s->block_index[i]] = 0;
3523 s->dc_val[0][s->block_index[i]] = 0;
3524 }
3525 s->current_picture.qscale_table[mb_pos] = 0;
3526
3527 if (!direct) {
3528 if (!skipped) {
3529 GET_MVDATA(dmv_x[0], dmv_y[0]);
3530 dmv_x[1] = dmv_x[0];
3531 dmv_y[1] = dmv_y[0];
3532 }
3533 if(skipped || !s->mb_intra) {
3534 bmvtype = decode012(gb);
3535 switch(bmvtype) {
3536 case 0:
3537 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
3538 break;
3539 case 1:
3540 bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
3541 break;
3542 case 2:
3543 bmvtype = BMV_TYPE_INTERPOLATED;
3544 dmv_x[0] = dmv_y[0] = 0;
3545 }
3546 }
3547 }
3548 for(i = 0; i < 6; i++)
3549 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3550
3551 if (skipped) {
3552 if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
3553 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3554 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3555 return;
3556 }
3557 if (direct) {
3558 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3559 GET_MQUANT();
3560 s->mb_intra = 0;
3561 mb_has_coeffs = 0;
3562 s->current_picture.qscale_table[mb_pos] = mquant;
3563 if(!v->ttmbf)
3564 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3565 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
3566 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3567 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3568 } else {
3569 if(!mb_has_coeffs && !s->mb_intra) {
3570 /* no coded blocks - effectively skipped */
3571 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3572 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3573 return;
3574 }
3575 if(s->mb_intra && !mb_has_coeffs) {
3576 GET_MQUANT();
3577 s->current_picture.qscale_table[mb_pos] = mquant;
3578 s->ac_pred = get_bits1(gb);
3579 cbp = 0;
3580 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3581 } else {
3582 if(bmvtype == BMV_TYPE_INTERPOLATED) {
3583 GET_MVDATA(dmv_x[0], dmv_y[0]);
3584 if(!mb_has_coeffs) {
3585 /* interpolated skipped block */
3586 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3587 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3588 return;
3589 }
3590 }
3591 vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
3592 if(!s->mb_intra) {
3593 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
3594 }
3595 if(s->mb_intra)
3596 s->ac_pred = get_bits1(gb);
3597 cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3598 GET_MQUANT();
3599 s->current_picture.qscale_table[mb_pos] = mquant;
3600 if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3601 ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
3602 }
3603 }
3604 dst_idx = 0;
3605 for (i=0; i<6; i++)
3606 {
3607 s->dc_val[0][s->block_index[i]] = 0;
3608 dst_idx += i >> 2;
3609 val = ((cbp >> (5 - i)) & 1);
3610 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3611 v->mb_type[0][s->block_index[i]] = s->mb_intra;
3612 if(s->mb_intra) {
3613 /* check if prediction blocks A and C are available */
3614 v->a_avail = v->c_avail = 0;
3615 if(i == 2 || i == 3 || !s->first_slice_line)
3616 v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3617 if(i == 1 || i == 3 || s->mb_x)
3618 v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3619
3620 vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
3621 if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3622 s->dsp.vc1_inv_trans_8x8(s->block[i]);
3623 if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
3624 for(j = 0; j < 64; j++) s->block[i][j] += 128;
3625 s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
3626 } else if(val) {
3627 vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
3628 if(!v->ttmbf && ttmb < 8) ttmb = -1;
3629 first_block = 0;
3630 if((i<4) || !(s->flags & CODEC_FLAG_GRAY))
3631 s->dsp.add_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
3632 }
3633 }
3634}
3635
3636/** Decode blocks of I-frame
3637 */
3638static void vc1_decode_i_blocks(VC1Context *v)
3639{
3640 int k, j;
3641 MpegEncContext *s = &v->s;
3642 int cbp, val;
3643 uint8_t *coded_val;
3644 int mb_pos;
3645
3646 /* select codingmode used for VLC tables selection */
3647 switch(v->y_ac_table_index){
3648 case 0:
3649 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3650 break;
3651 case 1:
3652 v->codingset = CS_HIGH_MOT_INTRA;
3653 break;
3654 case 2:
3655 v->codingset = CS_MID_RATE_INTRA;
3656 break;
3657 }
3658
3659 switch(v->c_ac_table_index){
3660 case 0:
3661 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3662 break;
3663 case 1:
3664 v->codingset2 = CS_HIGH_MOT_INTER;
3665 break;
3666 case 2:
3667 v->codingset2 = CS_MID_RATE_INTER;
3668 break;
3669 }
3670
3671 /* Set DC scale - y and c use the same */
3672 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3673 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3674
3675 //do frame decode
3676 s->mb_x = s->mb_y = 0;
3677 s->mb_intra = 1;
3678 s->first_slice_line = 1;
3679 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3680 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3681 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3682 ff_init_block_index(s);
3683 ff_update_block_index(s);
3684 s->dsp.clear_blocks(s->block[0]);
3685 mb_pos = s->mb_x + s->mb_y * s->mb_width;
3686 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3687 s->current_picture.qscale_table[mb_pos] = v->pq;
3688 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3689 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3690
3691 // do actual MB decoding and displaying
3692 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3693 v->s.ac_pred = get_bits(&v->s.gb, 1);
3694
3695 for(k = 0; k < 6; k++) {
3696 val = ((cbp >> (5 - k)) & 1);
3697
3698 if (k < 4) {
3699 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3700 val = val ^ pred;
3701 *coded_val = val;
3702 }
3703 cbp |= val << (5 - k);
3704
3705 vc1_decode_i_block(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2);
3706
3707 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3708 if(v->pq >= 9 && v->overlap) {
3709 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3710 }
3711 }
3712
3713 vc1_put_block(v, s->block);
3714 if(v->pq >= 9 && v->overlap) {
3715 if(!s->first_slice_line) {
3716 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3717 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3718 if(!(s->flags & CODEC_FLAG_GRAY)) {
3719 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3720 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3721 }
3722 }
3723 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3724 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3725 if(s->mb_x) {
3726 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3727 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3728 if(!(s->flags & CODEC_FLAG_GRAY)) {
3729 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3730 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3731 }
3732 }
3733 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3734 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3735 }
3736
3737 if(get_bits_count(&s->gb) > v->bits) {
3738 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3739 return;
3740 }
3741 }
3742 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3743 s->first_slice_line = 0;
3744 }
3745}
3746
3747/** Decode blocks of I-frame for advanced profile
3748 */
3749static void vc1_decode_i_blocks_adv(VC1Context *v)
3750{
3751 int k, j;
3752 MpegEncContext *s = &v->s;
3753 int cbp, val;
3754 uint8_t *coded_val;
3755 int mb_pos;
3756 int mquant = v->pq;
3757 int mqdiff;
3758 int overlap;
3759 GetBitContext *gb = &s->gb;
3760
3761 /* select codingmode used for VLC tables selection */
3762 switch(v->y_ac_table_index){
3763 case 0:
3764 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3765 break;
3766 case 1:
3767 v->codingset = CS_HIGH_MOT_INTRA;
3768 break;
3769 case 2:
3770 v->codingset = CS_MID_RATE_INTRA;
3771 break;
3772 }
3773
3774 switch(v->c_ac_table_index){
3775 case 0:
3776 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3777 break;
3778 case 1:
3779 v->codingset2 = CS_HIGH_MOT_INTER;
3780 break;
3781 case 2:
3782 v->codingset2 = CS_MID_RATE_INTER;
3783 break;
3784 }
3785
3786 /* Set DC scale - y and c use the same */
3787 s->y_dc_scale = s->y_dc_scale_table[v->pq];
3788 s->c_dc_scale = s->c_dc_scale_table[v->pq];
3789
3790 //do frame decode
3791 s->mb_x = s->mb_y = 0;
3792 s->mb_intra = 1;
3793 s->first_slice_line = 1;
3794 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3795 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3796 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3797 ff_init_block_index(s);
3798 ff_update_block_index(s);
3799 s->dsp.clear_blocks(s->block[0]);
3800 mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3801 s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3802 s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3803 s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3804
3805 // do actual MB decoding and displaying
3806 cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
3807 if(v->acpred_is_raw)
3808 v->s.ac_pred = get_bits(&v->s.gb, 1);
3809 else
3810 v->s.ac_pred = v->acpred_plane[mb_pos];
3811
3812 if(v->condover == CONDOVER_SELECT) {
3813 if(v->overflg_is_raw)
3814 overlap = get_bits(&v->s.gb, 1);
3815 else
3816 overlap = v->over_flags_plane[mb_pos];
3817 } else
3818 overlap = (v->condover == CONDOVER_ALL);
3819
3820 GET_MQUANT();
3821
3822 s->current_picture.qscale_table[mb_pos] = mquant;
3823
3824 for(k = 0; k < 6; k++) {
3825 val = ((cbp >> (5 - k)) & 1);
3826
3827 if (k < 4) {
3828 int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
3829 val = val ^ pred;
3830 *coded_val = val;
3831 }
3832 cbp |= val << (5 - k);
3833
3834 v->a_avail = !s->first_slice_line || (k==2 || k==3);
3835 v->c_avail = !!s->mb_x || (k==1 || k==3);
3836
3837 vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
3838
3839 s->dsp.vc1_inv_trans_8x8(s->block[k]);
3840 for(j = 0; j < 64; j++) s->block[k][j] += 128;
3841 }
3842
3843 vc1_put_block(v, s->block);
3844 if(overlap) {
3845 if(!s->first_slice_line) {
3846 s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
3847 s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
3848 if(!(s->flags & CODEC_FLAG_GRAY)) {
3849 s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
3850 s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
3851 }
3852 }
3853 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
3854 s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3855 if(s->mb_x) {
3856 s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
3857 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
3858 if(!(s->flags & CODEC_FLAG_GRAY)) {
3859 s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
3860 s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
3861 }
3862 }
3863 s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
3864 s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
3865 }
3866
3867 if(get_bits_count(&s->gb) > v->bits) {
3868 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
3869 return;
3870 }
3871 }
3872 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3873 s->first_slice_line = 0;
3874 }
3875}
3876
3877static void vc1_decode_p_blocks(VC1Context *v)
3878{
3879 MpegEncContext *s = &v->s;
3880
3881 /* select codingmode used for VLC tables selection */
3882 switch(v->c_ac_table_index){
3883 case 0:
3884 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3885 break;
3886 case 1:
3887 v->codingset = CS_HIGH_MOT_INTRA;
3888 break;
3889 case 2:
3890 v->codingset = CS_MID_RATE_INTRA;
3891 break;
3892 }
3893
3894 switch(v->c_ac_table_index){
3895 case 0:
3896 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3897 break;
3898 case 1:
3899 v->codingset2 = CS_HIGH_MOT_INTER;
3900 break;
3901 case 2:
3902 v->codingset2 = CS_MID_RATE_INTER;
3903 break;
3904 }
3905
3906 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3907 s->first_slice_line = 1;
3908 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3909 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3910 ff_init_block_index(s);
3911 ff_update_block_index(s);
3912 s->dsp.clear_blocks(s->block[0]);
3913
3914 vc1_decode_p_mb(v);
3915 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3916 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3917 return;
3918 }
3919 }
3920 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3921 s->first_slice_line = 0;
3922 }
3923}
3924
3925static void vc1_decode_b_blocks(VC1Context *v)
3926{
3927 MpegEncContext *s = &v->s;
3928
3929 /* select codingmode used for VLC tables selection */
3930 switch(v->c_ac_table_index){
3931 case 0:
3932 v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
3933 break;
3934 case 1:
3935 v->codingset = CS_HIGH_MOT_INTRA;
3936 break;
3937 case 2:
3938 v->codingset = CS_MID_RATE_INTRA;
3939 break;
3940 }
3941
3942 switch(v->c_ac_table_index){
3943 case 0:
3944 v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
3945 break;
3946 case 1:
3947 v->codingset2 = CS_HIGH_MOT_INTER;
3948 break;
3949 case 2:
3950 v->codingset2 = CS_MID_RATE_INTER;
3951 break;
3952 }
3953
3954 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3955 s->first_slice_line = 1;
3956 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3957 for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
3958 ff_init_block_index(s);
3959 ff_update_block_index(s);
3960 s->dsp.clear_blocks(s->block[0]);
3961
3962 vc1_decode_b_mb(v);
3963 if(get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
3964 av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n", get_bits_count(&s->gb), v->bits,s->mb_x,s->mb_y);
3965 return;
3966 }
3967 }
3968 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3969 s->first_slice_line = 0;
3970 }
3971}
3972
3973static void vc1_decode_skip_blocks(VC1Context *v)
3974{
3975 MpegEncContext *s = &v->s;
3976
3977 ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
3978 s->first_slice_line = 1;
3979 for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
3980 s->mb_x = 0;
3981 ff_init_block_index(s);
3982 ff_update_block_index(s);
3983 memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
3984 memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3985 memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
3986 ff_draw_horiz_band(s, s->mb_y * 16, 16);
3987 s->first_slice_line = 0;
3988 }
3989 s->pict_type = P_TYPE;
3990}
3991
3992static void vc1_decode_blocks(VC1Context *v)
3993{
3994
3995 v->s.esc3_level_length = 0;
3996
3997 switch(v->s.pict_type) {
3998 case I_TYPE:
3999 if(v->profile == PROFILE_ADVANCED)
4000 vc1_decode_i_blocks_adv(v);
4001 else
4002 vc1_decode_i_blocks(v);
4003 break;
4004 case P_TYPE:
4005 if(v->p_frame_skipped)
4006 vc1_decode_skip_blocks(v);
4007 else
4008 vc1_decode_p_blocks(v);
4009 break;
4010 case B_TYPE:
4011 if(v->bi_type)
4012 vc1_decode_i_blocks(v);
4013 else
4014 vc1_decode_b_blocks(v);
4015 break;
4016 }
4017}
4018
4019
4020/** Initialize a VC1/WMV3 decoder
4021 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4022 * @todo TODO: Decypher remaining bits in extra_data
4023 */
4024static int vc1_decode_init(AVCodecContext *avctx)
4025{
4026 VC1Context *v = avctx->priv_data;
4027 MpegEncContext *s = &v->s;
4028 GetBitContext gb;
4029
4030 if (!avctx->extradata_size || !avctx->extradata) return -1;
4031 if (!(avctx->flags & CODEC_FLAG_GRAY))
4032 avctx->pix_fmt = PIX_FMT_YUV420P;
4033 else
4034 avctx->pix_fmt = PIX_FMT_GRAY8;
4035 v->s.avctx = avctx;
4036 avctx->flags |= CODEC_FLAG_EMU_EDGE;
4037 v->s.flags |= CODEC_FLAG_EMU_EDGE;
4038
4039 if(ff_h263_decode_init(avctx) < 0)
4040 return -1;
4041 if (vc1_init_common(v) < 0) return -1;
4042
4043 avctx->coded_width = avctx->width;
4044 avctx->coded_height = avctx->height;
4045 if (avctx->codec_id == CODEC_ID_WMV3)
4046 {
4047 int count = 0;
4048
4049 // looks like WMV3 has a sequence header stored in the extradata
4050 // advanced sequence header may be before the first frame
4051 // the last byte of the extradata is a version number, 1 for the
4052 // samples we can decode
4053
4054 init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
4055
4056 if (decode_sequence_header(avctx, &gb) < 0)
4057 return -1;
4058
4059 count = avctx->extradata_size*8 - get_bits_count(&gb);
4060 if (count>0)
4061 {
4062 av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
4063 count, get_bits(&gb, count));
4064 }
4065 else if (count < 0)
4066 {
4067 av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
4068 }
4069 } else { // VC1/WVC1
4070 int edata_size = avctx->extradata_size;
4071 uint8_t *edata = avctx->extradata;
4072
4073 if(avctx->extradata_size < 16) {
4074 av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
4075 return -1;
4076 }
4077 while(edata_size > 8) {
4078 // test if we've found header
4079 if(BE_32(edata) == 0x0000010F) {
4080 edata += 4;
4081 edata_size -= 4;
4082 break;
4083 }
4084 edata_size--;
4085 edata++;
4086 }
4087
4088 init_get_bits(&gb, edata, edata_size*8);
4089
4090 if (decode_sequence_header(avctx, &gb) < 0)
4091 return -1;
4092
4093 while(edata_size > 8) {
4094 // test if we've found entry point
4095 if(BE_32(edata) == 0x0000010E) {
4096 edata += 4;
4097 edata_size -= 4;
4098 break;
4099 }
4100 edata_size--;
4101 edata++;
4102 }
4103
4104 init_get_bits(&gb, edata, edata_size*8);
4105
4106 if (decode_entry_point(avctx, &gb) < 0)
4107 return -1;
4108 }
4109 avctx->has_b_frames= !!(avctx->max_b_frames);
4110 s->low_delay = !avctx->has_b_frames;
4111
4112 s->mb_width = (avctx->coded_width+15)>>4;
4113 s->mb_height = (avctx->coded_height+15)>>4;
4114
4115 /* Allocate mb bitplanes */
4116 v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4117 v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
4118 v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
4119 v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
4120
4121 /* allocate block type info in that way so it could be used with s->block_index[] */
4122 v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
4123 v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
4124 v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
4125 v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
4126
4127 /* Init coded blocks info */
4128 if (v->profile == PROFILE_ADVANCED)
4129 {
4130// if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
4131// return -1;
4132// if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
4133// return -1;
4134 }
4135
4136 return 0;
4137}
4138
4139
4140/** Decode a VC1/WMV3 frame
4141 * @todo TODO: Handle VC-1 IDUs (Transport level?)
4142 */
4143static int vc1_decode_frame(AVCodecContext *avctx,
4144 void *data, int *data_size,
4145 uint8_t *buf, int buf_size)
4146{
4147 VC1Context *v = avctx->priv_data;
4148 MpegEncContext *s = &v->s;
4149 AVFrame *pict = data;
4150 uint8_t *buf2 = NULL;
4151
4152 /* no supplementary picture */
4153 if (buf_size == 0) {
4154 /* special case for last picture */
4155 if (s->low_delay==0 && s->next_picture_ptr) {
4156 *pict= *(AVFrame*)s->next_picture_ptr;
4157 s->next_picture_ptr= NULL;
4158
4159 *data_size = sizeof(AVFrame);
4160 }
4161
4162 return 0;
4163 }
4164
4165 //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
4166 if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
4167 int i= ff_find_unused_picture(s, 0);
4168 s->current_picture_ptr= &s->picture[i];
4169 }
4170
4171 //for advanced profile we need to unescape buffer
4172 if (avctx->codec_id == CODEC_ID_VC1) {
4173 int i, buf_size2;
4174 buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
4175 buf_size2 = 0;
4176 for(i = 0; i < buf_size; i++) {
4177 if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
4178 buf2[buf_size2++] = buf[i+1];
4179 i++;
4180 } else
4181 buf2[buf_size2++] = buf[i];
4182 }
4183 init_get_bits(&s->gb, buf2, buf_size2*8);
4184 } else
4185 init_get_bits(&s->gb, buf, buf_size*8);
4186 // do parse frame header
4187 if(v->profile < PROFILE_ADVANCED) {
4188 if(vc1_parse_frame_header(v, &s->gb) == -1) {
4189 av_free(buf2);
4190 return -1;
4191 }
4192 } else {
4193 if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
4194 av_free(buf2);
4195 return -1;
4196 }
4197 }
4198
4199 if(s->pict_type != I_TYPE && !v->res_rtm_flag){
4200 av_free(buf2);
4201 return -1;
4202 }
4203
4204 // for hurry_up==5
4205 s->current_picture.pict_type= s->pict_type;
4206 s->current_picture.key_frame= s->pict_type == I_TYPE;
4207
4208 /* skip B-frames if we don't have reference frames */
4209 if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
4210 av_free(buf2);
4211 return -1;//buf_size;
4212 }
4213 /* skip b frames if we are in a hurry */
4214 if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
4215 if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
4216 || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
4217 || avctx->skip_frame >= AVDISCARD_ALL) {
4218 av_free(buf2);
4219 return buf_size;
4220 }
4221 /* skip everything if we are in a hurry>=5 */
4222 if(avctx->hurry_up>=5) {
4223 av_free(buf2);
4224 return -1;//buf_size;
4225 }
4226
4227 if(s->next_p_frame_damaged){
4228 if(s->pict_type==B_TYPE)
4229 return buf_size;
4230 else
4231 s->next_p_frame_damaged=0;
4232 }
4233
4234 if(MPV_frame_start(s, avctx) < 0) {
4235 av_free(buf2);
4236 return -1;
4237 }
4238
4239 ff_er_frame_start(s);
4240
4241 v->bits = buf_size * 8;
4242 vc1_decode_blocks(v);
4243//av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8);
4244// if(get_bits_count(&s->gb) > buf_size * 8)
4245// return -1;
4246 ff_er_frame_end(s);
4247
4248 MPV_frame_end(s);
4249
4250assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
4251assert(s->current_picture.pict_type == s->pict_type);
4252 if (s->pict_type == B_TYPE || s->low_delay) {
4253 *pict= *(AVFrame*)s->current_picture_ptr;
4254 } else if (s->last_picture_ptr != NULL) {
4255 *pict= *(AVFrame*)s->last_picture_ptr;
4256 }
4257
4258 if(s->last_picture_ptr || s->low_delay){
4259 *data_size = sizeof(AVFrame);
4260 ff_print_debug_info(s, pict);
4261 }
4262
4263 /* Return the Picture timestamp as the frame number */
4264 /* we substract 1 because it is added on utils.c */
4265 avctx->frame_number = s->picture_number - 1;
4266
4267 av_free(buf2);
4268 return buf_size;
4269}
4270
4271
4272/** Close a VC1/WMV3 decoder
4273 * @warning Initial try at using MpegEncContext stuff
4274 */
4275static int vc1_decode_end(AVCodecContext *avctx)
4276{
4277 VC1Context *v = avctx->priv_data;
4278
4279 av_freep(&v->hrd_rate);
4280 av_freep(&v->hrd_buffer);
4281 MPV_common_end(&v->s);
4282 av_freep(&v->mv_type_mb_plane);
4283 av_freep(&v->direct_mb_plane);
4284 av_freep(&v->acpred_plane);
4285 av_freep(&v->over_flags_plane);
4286 av_freep(&v->mb_type_base);
4287 return 0;
4288}
4289
4290
4291AVCodec vc1_decoder = {
4292 "vc1",
4293 CODEC_TYPE_VIDEO,
4294 CODEC_ID_VC1,
4295 sizeof(VC1Context),
4296 vc1_decode_init,
4297 NULL,
4298 vc1_decode_end,
4299 vc1_decode_frame,
4300 CODEC_CAP_DELAY,
4301 NULL
4302};
4303
4304AVCodec wmv3_decoder = {
4305 "wmv3",
4306 CODEC_TYPE_VIDEO,
4307 CODEC_ID_WMV3,
4308 sizeof(VC1Context),
4309 vc1_decode_init,
4310 NULL,
4311 vc1_decode_end,
4312 vc1_decode_frame,
4313 CODEC_CAP_DELAY,
4314 NULL
4315};
Note: See TracBrowser for help on using the repository browser.