max = FFMAX(max, FFABS(vector[i]));
bits = normalize_bits(max, 15);
- scale = (bits == 15) ? 0x7FFF : (1 << bits);
+ scale = shift_table[bits];
-- for (i = 0; i < length; i++)
- vector[i] = av_clipl_int32(vector[i] * scale << 1) >> 4;
++ for (i = 0; i < length; i++) {
++ av_assert2(av_clipl_int32(vector[i] * (int64_t)scale << 1) == vector[i] * (int64_t)scale << 1);
+ vector[i] = (vector[i] * scale) >> 3;
++ }
return bits - 3;
}
int i;
pitch_lag = FFMIN(PITCH_MAX - 3, pitch_lag);
- limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
+ if (dir > 0)
+ limit = FFMIN(FRAME_LEN + PITCH_MAX - offset - length, pitch_lag + 3);
+ else
+ limit = pitch_lag + 3;
for (i = pitch_lag - 3; i <= limit; i++) {
- ccr = dot_product(buf, buf + dir * i, length, 1);
+ ccr = ff_dot_product(buf, buf + dir * i, length)<<1;
if (ccr > *ccr_max) {
*ccr_max = ccr;
int16_t lpc[SUBFRAMES * LPC_ORDER];
int16_t acb_vector[SUBFRAME_LEN];
int16_t *vector_ptr;
+ int16_t *out;
int bad_frame = 0, i, j, ret;
- if (buf_size < frame_size[dec_mode]) {
- if (buf_size)
- av_log(avctx, AV_LOG_WARNING,
- "Expected %d bytes, got %d - skipping packet\n",
- frame_size[dec_mode], buf_size);
+ if (!buf_size || buf_size < frame_size[dec_mode]) {
*got_frame_ptr = 0;
return buf_size;
}
p->cur_frame_type = UNTRANSMITTED_FRAME;
}
- p->frame.nb_samples = FRAME_LEN;
+ p->frame.nb_samples = FRAME_LEN + LPC_ORDER;
if ((ret = avctx->get_buffer(avctx, &p->frame)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
- return ret;
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return ret;
}
- out= (int16_t*)p->frame.data[0];
+ out = (int16_t *)p->frame.data[0];
if (p->cur_frame_type == ACTIVE_FRAME) {
if (!bad_frame)
memcpy(p->prev_excitation, p->excitation + FRAME_LEN,
PITCH_MAX * sizeof(*p->excitation));
} else {
- memset(out, 0, sizeof(int16_t)*FRAME_LEN);
+ memset(out, 0, FRAME_LEN * 2);
av_log(avctx, AV_LOG_WARNING,
"G.723.1: Comfort noise generation not supported yet\n");
-
- *got_frame_ptr = 1;
- *(AVFrame *)data = p->frame;
return frame_size[dec_mode];
}
p->past_frame_type = p->cur_frame_type;
- memcpy(p->audio, p->synth_mem, LPC_ORDER * sizeof(*p->audio));
+ memcpy(out, p->synth_mem, LPC_ORDER * sizeof(int16_t));
for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
- ff_celp_lp_synthesis_filter(p->audio + i, &lpc[j * LPC_ORDER],
- p->audio + i, SUBFRAME_LEN, LPC_ORDER,
+ ff_celp_lp_synthesis_filter(out + i, &lpc[j * LPC_ORDER],
+ out + i, SUBFRAME_LEN, LPC_ORDER,
0, 1, 1 << 12);
- memcpy(p->synth_mem, p->audio + FRAME_LEN, LPC_ORDER * sizeof(*p->audio));
+ memcpy(p->synth_mem, out + FRAME_LEN, LPC_ORDER * sizeof(int16_t));
- if (p->postfilter)
+ if (p->postfilter) {
- formant_postfilter(p, lpc, p->audio);
- memcpy(p->frame.data[0], p->audio + LPC_ORDER, FRAME_LEN * 2);
+ formant_postfilter(p, lpc, out);
+ } else { // if output is not postfiltered it should be scaled by 2
+ for (i = 0; i < FRAME_LEN; i++)
- out[i] = av_clip_int16(p->audio[LPC_ORDER + i] << 1);
++ out[LPC_ORDER + i] = av_clip_int16(out[LPC_ORDER + i] << 1);
+ }
- *(AVFrame*)data = p->frame;
- *got_frame_ptr = 1;
+ memmove(out, out + LPC_ORDER, sizeof(int16_t)*FRAME_LEN);
+ p->frame.nb_samples = FRAME_LEN;
++
+ *got_frame_ptr = 1;
+ *(AVFrame *)data = p->frame;
return frame_size[dec_mode];
}
int bidir, int edges, int step,
int mask_mv0, int mask_mv1, int field);
- #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
- void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
- int alpha, int beta, int8_t *tc0);
+ #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
+ void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
+ int stride, \
+ int alpha, \
+ int beta, \
+ int8_t *tc0);
#define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
- void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *pix, int stride, \
- int alpha, int beta);
-
- #define LF_FUNCS(type, depth)\
- LF_FUNC (h, chroma, depth, mmx2)\
- LF_IFUNC(h, chroma_intra, depth, mmx2)\
- LF_FUNC (v, chroma, depth, mmx2)\
- LF_IFUNC(v, chroma_intra, depth, mmx2)\
- LF_FUNC (h, luma, depth, mmx2)\
- LF_IFUNC(h, luma_intra, depth, mmx2)\
- LF_FUNC (h, luma, depth, sse2)\
- LF_IFUNC(h, luma_intra, depth, sse2)\
- LF_FUNC (v, luma, depth, sse2)\
- LF_IFUNC(v, luma_intra, depth, sse2)\
- LF_FUNC (h, chroma, depth, sse2)\
- LF_IFUNC(h, chroma_intra, depth, sse2)\
- LF_FUNC (v, chroma, depth, sse2)\
- LF_IFUNC(v, chroma_intra, depth, sse2)\
- LF_FUNC (h, luma, depth, avx)\
- LF_IFUNC(h, luma_intra, depth, avx)\
- LF_FUNC (v, luma, depth, avx)\
- LF_IFUNC(v, luma_intra, depth, avx)\
- LF_FUNC (h, chroma, depth, avx)\
- LF_IFUNC(h, chroma_intra, depth, avx)\
- LF_FUNC (v, chroma, depth, avx)\
- LF_IFUNC(v, chroma_intra, depth, avx)
-
- LF_FUNCS( uint8_t, 8)
+ void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
+ int stride, \
+ int alpha, \
+ int beta);
+
+ #define LF_FUNCS(type, depth) \
+ LF_FUNC(h, chroma, depth, mmx2) \
+ LF_IFUNC(h, chroma_intra, depth, mmx2) \
+ LF_FUNC(v, chroma, depth, mmx2) \
+ LF_IFUNC(v, chroma_intra, depth, mmx2) \
+ LF_FUNC(h, luma, depth, mmx2) \
+ LF_IFUNC(h, luma_intra, depth, mmx2) \
+ LF_FUNC(h, luma, depth, sse2) \
+ LF_IFUNC(h, luma_intra, depth, sse2) \
+ LF_FUNC(v, luma, depth, sse2) \
+ LF_IFUNC(v, luma_intra, depth, sse2) \
+ LF_FUNC(h, chroma, depth, sse2) \
+ LF_IFUNC(h, chroma_intra, depth, sse2) \
+ LF_FUNC(v, chroma, depth, sse2) \
+ LF_IFUNC(v, chroma_intra, depth, sse2) \
+ LF_FUNC(h, luma, depth, avx) \
+ LF_IFUNC(h, luma_intra, depth, avx) \
+ LF_FUNC(v, luma, depth, avx) \
+ LF_IFUNC(v, luma_intra, depth, avx) \
+ LF_FUNC(h, chroma, depth, avx) \
+ LF_IFUNC(h, chroma_intra, depth, avx) \
+ LF_FUNC(v, chroma, depth, avx) \
+ LF_IFUNC(v, chroma_intra, depth, avx)
+
+ LF_FUNCS(uint8_t, 8)
LF_FUNCS(uint16_t, 10)
-#if ARCH_X86_32
+#if ARCH_X86_32 && HAVE_YASM
- LF_FUNC (v8, luma, 8, mmx2)
- static void ff_deblock_v_luma_8_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
+ LF_FUNC(v8, luma, 8, mmx2)
+ static void ff_deblock_v_luma_8_mmx2(uint8_t *pix, int stride, int alpha,
+ int beta, int8_t *tc0)
{
- if((tc0[0] & tc0[1]) >= 0)
- ff_deblock_v8_luma_8_mmx2(pix+0, stride, alpha, beta, tc0);
- if((tc0[2] & tc0[3]) >= 0)
- ff_deblock_v8_luma_8_mmx2(pix+8, stride, alpha, beta, tc0+2);
+ if ((tc0[0] & tc0[1]) >= 0)
+ ff_deblock_v8_luma_8_mmx2(pix + 0, stride, alpha, beta, tc0);
+ if ((tc0[2] & tc0[3]) >= 0)
+ ff_deblock_v8_luma_8_mmx2(pix + 8, stride, alpha, beta, tc0 + 2);
}
- LF_IFUNC(v8, luma_intra, 8, mmx2)
- static void ff_deblock_v_luma_intra_8_mmx2(uint8_t *pix, int stride, int alpha, int beta)
+
+ LF_IFUNC(v8, luma_intra, 8, mmx2)
+ static void ff_deblock_v_luma_intra_8_mmx2(uint8_t *pix, int stride,
+ int alpha, int beta)
{
- ff_deblock_v8_luma_intra_8_mmx2(pix+0, stride, alpha, beta);
- ff_deblock_v8_luma_intra_8_mmx2(pix+8, stride, alpha, beta);
+ ff_deblock_v8_luma_intra_8_mmx2(pix + 0, stride, alpha, beta);
+ ff_deblock_v8_luma_intra_8_mmx2(pix + 8, stride, alpha, beta);
}
#endif /* ARCH_X86_32 */
#if HAVE_YASM
int mm_flags = av_get_cpu_flags();
- if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMX2) {
+ if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMX2)
c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmx2;
- }
if (bit_depth == 8) {
- if (mm_flags & AV_CPU_FLAG_MMX) {
- c->h264_idct_dc_add =
- c->h264_idct_add = ff_h264_idct_add_8_mmx;
- c->h264_idct8_dc_add =
- c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
-
- c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
- c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
- if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
- c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
- if (mm_flags & AV_CPU_FLAG_CMOV)
- c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx;
-
- if (mm_flags & AV_CPU_FLAG_MMX2) {
- c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmx2;
- c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
- c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
- c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ c->h264_idct_dc_add =
+ c->h264_idct_add = ff_h264_idct_add_8_mmx;
+ c->h264_idct8_dc_add =
+ c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
- c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
-
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmx2;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_8_mmx2;
- if (chroma_format_idc == 1) {
- c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmx2;
- c->h264_h_loop_filter_chroma_intra= ff_deblock_h_chroma_intra_8_mmx2;
- }
+ c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
+ if (mm_flags & AV_CPU_FLAG_CMOV)
+ c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx;
+
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
+ c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmx2;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
+ c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx2;
+
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmx2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmx2;
+ if (chroma_format_idc == 1) {
+ c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmx2;
+ c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmx2;
+ }
#if ARCH_X86_32
- c->h264_v_loop_filter_luma= ff_deblock_v_luma_8_mmx2;
- c->h264_h_loop_filter_luma= ff_deblock_h_luma_8_mmx2;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmx2;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmx2;
- #endif
- c->weight_h264_pixels_tab[0]= ff_h264_weight_16_mmx2;
- c->weight_h264_pixels_tab[1]= ff_h264_weight_8_mmx2;
- c->weight_h264_pixels_tab[2]= ff_h264_weight_4_mmx2;
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_mmx2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmx2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmx2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmx2;
+ #endif /* ARCH_X86_32 */
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmx2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_mmx2;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_mmx2;
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_mmx2;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_mmx2;
- c->biweight_h264_pixels_tab[2]= ff_h264_biweight_4_mmx2;
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_mmx2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_mmx2;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_mmx2;
- if (mm_flags&AV_CPU_FLAG_SSE2) {
- c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
+ c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
- c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
- c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
- if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
- c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
- c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
+ c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
+ c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2;
- c->weight_h264_pixels_tab[0]= ff_h264_weight_16_sse2;
- c->weight_h264_pixels_tab[1]= ff_h264_weight_8_sse2;
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_sse2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_sse2;
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_sse2;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_sse2;
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_sse2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_sse2;
#if HAVE_ALIGNED_STACK
- c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
- c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
- #endif
- }
- if (mm_flags&AV_CPU_FLAG_SSSE3) {
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16_ssse3;
- c->biweight_h264_pixels_tab[1]= ff_h264_biweight_8_ssse3;
- }
- if (HAVE_AVX && mm_flags&AV_CPU_FLAG_AVX) {
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ if (mm_flags & AV_CPU_FLAG_SSSE3) {
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_ssse3;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_ssse3;
+ }
- if (mm_flags & AV_CPU_FLAG_AVX) {
++ if (HAVE_AVX && mm_flags & AV_CPU_FLAG_AVX) {
#if HAVE_ALIGNED_STACK
- c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
- c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
- #endif
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
}
}
- }
} else if (bit_depth == 10) {
- if (mm_flags & AV_CPU_FLAG_MMX) {
- if (mm_flags & AV_CPU_FLAG_MMX2) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMX2) {
#if ARCH_X86_32
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_mmx2;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_mmx2;
- c->h264_v_loop_filter_luma= ff_deblock_v_luma_10_mmx2;
- c->h264_h_loop_filter_luma= ff_deblock_h_luma_10_mmx2;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmx2;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmx2;
- #endif
- c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
- if (mm_flags&AV_CPU_FLAG_SSE2) {
- c->h264_idct_add = ff_h264_idct_add_10_sse2;
- c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
-
- c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
- if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
- c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_mmx2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_mmx2;
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_mmx2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_mmx2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmx2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmx2;
+ #endif /* ARCH_X86_32 */
+ c->h264_idct_dc_add = ff_h264_idct_dc_add_10_mmx2;
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
+ c->h264_idct_add = ff_h264_idct_add_10_sse2;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
#if HAVE_ALIGNED_STACK
- c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
- c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
- #endif
+ c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
- c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
- c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
- c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
- c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
- c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
- c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_sse2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_sse2;
#if HAVE_ALIGNED_STACK
- c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
- c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
- #endif
- }
- if (mm_flags&AV_CPU_FLAG_SSE4) {
- c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
- c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
- c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
-
- c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
- c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
- c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
- }
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ if (mm_flags & AV_CPU_FLAG_SSE4) {
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
+ }
#if HAVE_AVX
- if (mm_flags&AV_CPU_FLAG_AVX) {
- c->h264_idct_dc_add =
- c->h264_idct_add = ff_h264_idct_add_10_avx;
- c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
-
- c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
- if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
- c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
+ if (mm_flags & AV_CPU_FLAG_AVX) {
+ c->h264_idct_dc_add =
+ c->h264_idct_add = ff_h264_idct_add_10_avx;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
#if HAVE_ALIGNED_STACK
- c->h264_idct8_add = ff_h264_idct8_add_10_avx;
- c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
- #endif
+ c->h264_idct8_add = ff_h264_idct8_add_10_avx;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
+ #endif /* HAVE_ALIGNED_STACK */
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_avx;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_avx;
#if HAVE_ALIGNED_STACK
- c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
- c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
- #endif
- }
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
#endif /* HAVE_AVX */
+ }
}
}
- }
- #endif
+ #endif /* HAVE_YASM */
}