{
DVVideoDecodeContext *s = avctx->priv_data;
MpegEncContext s2;
- static int done;
+ static int done=0;
if (!done) {
int i;
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
int n, int coded, int intra);
static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr);
+#ifdef CONFIG_ENCODERS
static void mpeg4_inv_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir);
+#endif //CONFIG_ENCODERS
static void mpeg4_decode_sprite_trajectory(MpegEncContext * s);
static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr, int *dir_ptr);
extern uint32_t inverse[256];
+#ifdef CONFIG_ENCODERS
static uint8_t uni_DCtab_lum_len[512];
static uint8_t uni_DCtab_chrom_len[512];
static uint16_t uni_DCtab_lum_bits[512];
static uint16_t uni_DCtab_chrom_bits[512];
-#ifdef CONFIG_ENCODERS
static uint16_t (*mv_penalty)[MAX_MV*2+1]= NULL;
static uint8_t fcode_tab[MAX_MV*2+1];
static uint8_t umv_fcode_tab[MAX_MV*2+1];
return format;
}
+#ifdef CONFIG_ENCODERS
+
static void float_aspect_to_info(MpegEncContext * s, float aspect){
int i;
}
}
+#endif //CONFIG_ENCODERS
+
void ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_width;
int xy= s->block_index[0];
return pred_dc;
}
-
static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
{
int x, y, wrap, a, c, pred_dc, scale, i;
}
#endif
+#ifdef CONFIG_ENCODERS
+
static void init_uni_dc_tab(void)
{
int level, uni_code, uni_len;
}
}
+#endif //CONFIG_ENCODERS
+
#ifdef CONFIG_ENCODERS
static void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab){
int slevel, run, last;
}
#endif
+#ifdef CONFIG_ENCODERS
+
/***************************************************/
/**
* add mpeg4 stuffing bits (01...1)
s->v_edge_pos= s->height;
}
+#endif //CONFIG_ENCODERS
+
/**
* change qscale by given dquant and update qscale dependant variables.
*/
}
+#ifdef CONFIG_ENCODERS
+
static void mpeg4_inv_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir)
{
}
#endif
}
-#ifdef CONFIG_ENCODERS
+
/**
* encodes a 8x8 block
* @param n block index (0-3 are luma, 4-5 are chroma)
tab[i] = val;
}
+#ifdef CONFIG_ENCODERS
+
void ff_mpeg4_init_partitions(MpegEncContext *s)
{
init_put_bits(&s->tex_pb, s->tex_pb_buffer, PB_BUFFER_SIZE, NULL, NULL);
s->last_bits= get_bit_count(&s->pb);
}
+#endif //CONFIG_ENCODERS
+
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s){
switch(s->pict_type){
case I_TYPE:
}
}
+#ifdef CONFIG_ENCODERS
+
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
{
int mb_num_bits= av_log2(s->mb_num - 1) + 1;
put_bits(&s->pb, 1, 0); /* no HEC */
}
+#endif //CONFIG_ENCODERS
+
/**
* check if the next stuff is a resync marker or the end.
* @return 0 if not
s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
}
+#ifdef CONFIG_ENCODERS
void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
{
mpeg1_encode_sequence_header(s);
else
{ // No coded bloc pattern
if (s->mv_dir == (MV_DIR_FORWARD | MV_DIR_BACKWARD))
- { // Bi-directional motion
+ { // Bi-directional motion
put_bits(&s->pb, 2, 2); /* backward & forward motion */
mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
/* end of block */
put_bits(&s->pb, 2, 0x2);
}
+#endif //CONFIG_ENCODERS
/******************************************/
/* decoding */
//#undef NDEBUG
//#include <assert.h>
+#ifdef CONFIG_ENCODERS
static void encode_picture(MpegEncContext *s, int picture_number);
+#endif //CONFIG_ENCODERS
static void dct_unquantize_mpeg1_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
static void dct_unquantize_mpeg2_c(MpegEncContext *s,
static void dct_unquantize_h263_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
+#ifdef CONFIG_ENCODERS
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
+#endif //CONFIG_ENCODERS
void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
};
+#ifdef CONFIG_ENCODERS
static uint16_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
static uint8_t default_fcode_tab[MAX_MV*2+1];
}
}
}
+#endif //CONFIG_ENCODERS
+
// move into common.c perhaps
#define CHECKED_ALLOCZ(p, size)\
{\
s->dct_unquantize_h263 = dct_unquantize_h263_c;
s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
+#ifdef CONFIG_ENCODERS
s->dct_quantize= dct_quantize_c;
if(s->avctx->dct_algo==FF_DCT_FASTINT)
s->fdct = fdct_ifast;
else
s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
+#endif //CONFIG_ENCODERS
if(s->avctx->idct_algo==FF_IDCT_INT){
s->idct_put= ff_jref_idct_put;
MPV_common_init_ppc(s);
#endif
+#ifdef CONFIG_ENCODERS
s->fast_dct_quantize= s->dct_quantize;
if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
}
+#endif //CONFIG_ENCODERS
+
switch(s->idct_permutation_type){
case FF_NO_IDCT_PERM:
for(i=0; i<64; i++)
s->context_initialized = 0;
}
+#ifdef CONFIG_ENCODERS
+
/* init video encoder */
int MPV_encode_init(AVCodecContext *avctx)
{
return 0;
}
+#endif //CONFIG_ENCODERS
+
void init_rl(RLTable *rl)
{
int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
}
}
+#ifdef CONFIG_ENCODERS
+
static int get_sae(uint8_t *src, int ref, int stride){
int x,y;
int acc=0;
return pbBufPtr(&s->pb) - s->pb.buf;
}
+#endif //CONFIG_ENCODERS
+
static inline void gmc1_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
int dest_offset,
}
}
+#ifdef CONFIG_ENCODERS
+
static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
{
static const char tab[64]=
#endif
+#endif //CONFIG_ENCODERS
+
void ff_draw_horiz_band(MpegEncContext *s){
if ( s->avctx->draw_horiz_band
&& (s->last_picture.data[0] || s->low_delay) ) {
}
}
+#ifdef CONFIG_ENCODERS
+
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{
const int mb_x= s->mb_x;
s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
}
-#ifdef CONFIG_ENCODERS
/* huffman encode */
switch(s->codec_id){ //FIXME funct ptr could be slightly faster
case CODEC_ID_MPEG1VIDEO:
default:
assert(0);
}
-#endif
}
+#endif //CONFIG_ENCODERS
+
/**
* combines the (truncated) bitstream to a complete frame
* @returns -1 if no complete frame could be created
return 0;
}
+#ifdef CONFIG_ENCODERS
void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
{
int bytes= length>>4;
return last_non_zero;
}
+#endif //CONFIG_ENCODERS
+
static void dct_unquantize_mpeg1_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
}
}
+
char ff_get_pict_type_char(int pict_type){
switch(pict_type){
case I_TYPE: return 'I';
AVOPTION_END()
};
+#ifdef CONFIG_ENCODERS
+
AVCodec mpeg1video_encoder = {
"mpeg1video",
CODEC_TYPE_VIDEO,
MPV_encode_picture,
MPV_encode_end,
};
+
+#endif //CONFIG_ENCODERS
+
static uint32_t v2_dc_lum_table[512][2];
static uint32_t v2_dc_chroma_table[512][2];
+#ifdef CONFIG_ENCODERS
static inline void msmpeg4_encode_block(MpegEncContext * s, DCTELEM * block, int n);
+#endif //CONFIG_ENCODERS
static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
int n, int coded, const uint8_t *scantable);
static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
static int msmpeg4_decode_motion(MpegEncContext * s,
int *mx_ptr, int *my_ptr);
+#ifdef CONFIG_ENCODERS
static void msmpeg4v2_encode_motion(MpegEncContext * s, int val);
+#endif //CONFIG_ENCODERS
static void init_h263_dc_for_msmpeg4(void);
static inline void msmpeg4_memsetw(short *tab, int val, int n);
+#ifdef CONFIG_ENCODERS
static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra);
+#endif //CONFIG_ENCODERS
static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
static int wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
}
}
+#ifdef CONFIG_ENCODERS
+
/* build the table which associate a (x,y) motion vector to a vlc */
static void init_mv_table(MVTable *tab)
{
}
}
+#endif //CONFIG_ENCODERS
+
/* predict coded block */
static inline int coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
{
return pred;
}
+#ifdef CONFIG_ENCODERS
+
static void msmpeg4_encode_motion(MpegEncContext * s,
int mx, int my)
{
}
}
+#endif //CONFIG_ENCODERS
+
/* old ffmpeg msmpeg4v3 mode */
static void ff_old_msmpeg4_dc_scale(MpegEncContext * s)
{
#define DC_MAX 119
+#ifdef CONFIG_ENCODERS
+
static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr)
{
int sign, code;
}
}
+#endif //CONFIG_ENCODERS
+
/****************************************/
/* decoding stuff */
tab[i] = val;
}
+#ifdef CONFIG_ENCODERS
+
static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
{
int range, bit_size, sign, code, bits;
}
}
+#endif //CONFIG_ENCODERS
+
/* this is identical to h263 except that its range is multiplied by 2 */
static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
{
return -code;
}
+#ifdef CONFIG_ENCODERS
+
/* write RV 1.0 compatible frame header */
void rv10_encode_picture_header(MpegEncContext *s, int picture_number)
{
}
}
+#endif //CONFIG_ENCODERS
+
/* read RV 1.0 compatible frame header */
static int rv10_decode_picture_header(MpegEncContext *s)
{
static int rv10_decode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
- static int done;
+ static int done=0;
s->avctx= avctx;
s->out_format = FMT_H263;
return 0;
}
+#ifdef CONFIG_ENCODERS
static int wmv2_encode_init(AVCodecContext *avctx){
Wmv2Context * const w= avctx->priv_data;
msmpeg4_encode_block(s, block[i], i);
}
}
+#endif //CONFIG_ENCODERS
static void parse_mb_skip(Wmv2Context * w){
int mb_x, mb_y;
CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
};
+#ifdef CONFIG_ENCODERS
AVCodec wmv2_encoder = {
"wmv2",
CODEC_TYPE_VIDEO,
MPV_encode_picture,
MPV_encode_end,
};
-
+#endif