{ "IJG-AAN-INT", ff_fdct_ifast, SCALE_PERM },
{ "IJG-LLM-INT", ff_jpeg_fdct_islow_8, NO_PERM },
-#if HAVE_MMX && HAVE_INLINE_ASM
+#if HAVE_MMX_INLINE
{ "MMX", ff_fdct_mmx, NO_PERM, AV_CPU_FLAG_MMX },
{ "MMXEXT", ff_fdct_mmx2, NO_PERM, AV_CPU_FLAG_MMXEXT },
{ "SSE2", ff_fdct_sse2, NO_PERM, AV_CPU_FLAG_SSE2 },
{ "INT", ff_j_rev_dct, MMX_PERM },
{ "SIMPLE-C", ff_simple_idct_8, NO_PERM },
-#if HAVE_MMX && HAVE_INLINE_ASM
+#if HAVE_MMX_INLINE
{ "SIMPLE-MMX", ff_simple_idct_mmx, MMX_SIMPLE_PERM, AV_CPU_FLAG_MMX },
{ "XVID-MMX", ff_idct_xvid_mmx, NO_PERM, AV_CPU_FLAG_MMX, 1 },
{ "XVID-MMXEXT", ff_idct_xvid_mmx2, NO_PERM, AV_CPU_FLAG_MMXEXT, 1 },
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
-#if HAVE_MMX && HAVE_YASM
+#if HAVE_MMX_EXTERNAL
#include "x86/dsputil_mmx.h"
#endif
#define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
#define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
-#if HAVE_MMX && HAVE_YASM
+#if HAVE_MMX_EXTERNAL
#define deinterlace_line_inplace ff_deinterlace_line_inplace_mmx
#define deinterlace_line ff_deinterlace_line_mmx
#else
return 0;
}
-#if !(HAVE_MMX && HAVE_YASM)
+#if !HAVE_MMX_EXTERNAL
/* filter parameters: [-1 4 2 4 -1] // 8 */
static void deinterlace_line_c(uint8_t *dst,
const uint8_t *lum_m4, const uint8_t *lum_m3,
lum++;
}
}
-#endif
+#endif /* !HAVE_MMX_EXTERNAL */
/* deinterlacing : 2 temporal taps, 3 spatial taps linear filter. The
top field is copied as is, but the bottom field is deinterlaced
%define LOOP_ALIGN ALIGN 16
AC3_EXPONENT_MIN mmxext
%endif
-%if HAVE_SSE
+%if HAVE_SSE2_EXTERNAL
INIT_XMM
AC3_EXPONENT_MIN sse2
%endif
pabsd %1, %1
%endmacro
-%if HAVE_AMD3DNOW
+%if HAVE_AMD3DNOW_EXTERNAL
INIT_MMX
cglobal ac3_extract_exponents_3dnow, 3,3,0, exp, coef, len
add expq, lenq
REP_RET
%endmacro
-%if HAVE_SSE
+%if HAVE_SSE2_EXTERNAL
INIT_XMM
%define PABSD PABSD_MMX
AC3_EXTRACT_EXPONENTS sse2
-%if HAVE_SSSE3
+%if HAVE_SSSE3_EXTERNAL
%define PABSD PABSD_SSSE3
AC3_EXTRACT_EXPONENTS ssse3
%endif
INIT_YMM avx
SECTION_TEXT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
; void ff_dct32_float_avx(FFTSample *out, const FFTSample *in)
cglobal dct32_float, 2,3,8, out, in, tmp
; pass 1
INIT_XMM sse
VECTOR_FMUL_REVERSE
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_REVERSE
%endif
INIT_XMM sse
VECTOR_FMUL_ADD
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL_ADD
%endif
INIT_XMM sse
BUTTERFLIES_FLOAT_INTERLEAVE
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
BUTTERFLIES_FLOAT_INTERLEAVE
%endif
static void dsputil_init_3dnowext(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
-#if HAVE_6REGS && HAVE_INLINE_ASM
+#if HAVE_AMD3DNOWEXT_INLINE && HAVE_6REGS
c->vector_fmul_window = vector_fmul_window_3dnowext;
#endif
}
static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
-#if HAVE_SSSE3
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
const int bit_depth = avctx->bits_per_raw_sample;
-#if HAVE_INLINE_ASM
+#if HAVE_SSSE3_INLINE
if (!high_bit_depth && CONFIG_H264QPEL) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(3, 2, ssse3);
H264_QPEL_FUNCS(3, 3, ssse3);
}
-#endif /* HAVE_INLINE_ASM */
-#if HAVE_YASM
+#endif /* HAVE_SSSE3_INLINE */
+
+#if HAVE_SSSE3_EXTERNAL
if (bit_depth == 10 && CONFIG_H264QPEL) {
H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
c->bswap_buf = ff_bswap32_buf_ssse3;
-#endif
-#endif
+#endif /* HAVE_SSSE3_EXTERNAL */
}
static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
-#if HAVE_YASM
+#if HAVE_SSE4_EXTERNAL
c->vector_clip_int32 = ff_vector_clip_int32_sse4;
-#endif
+#endif /* HAVE_SSE4_EXTERNAL */
}
static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
{
-#if HAVE_AVX && HAVE_YASM
+#if HAVE_AVX_EXTERNAL
const int bit_depth = avctx->bits_per_raw_sample;
if (bit_depth == 10) {
c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
c->vector_fmul_reverse = ff_vector_fmul_reverse_avx;
c->vector_fmul_add = ff_vector_fmul_add_avx;
-#endif
+#endif /* HAVE_AVX_EXTERNAL */
}
void ff_dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx)
DCT_SAD_FUNC(sse2)
#undef MMABS
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
#define MMABS(a,z) MMABS_SSSE3(a,z)
DCT_SAD_FUNC(ssse3)
#undef MMABS
#undef SCALE_OFFSET
#undef PMULHRW
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
#undef PHADDD
#define DEF(x) x ## _ssse3
#define SET_RND(x)
#undef SCALE_OFFSET
#undef PMULHRW
#undef PHADDD
-#endif //HAVE_SSSE3
+#endif /* HAVE_SSSE3_INLINE */
#endif /* HAVE_INLINE_ASM */
c->sum_abs_dctelem= sum_abs_dctelem_sse2;
}
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
if(mm_flags & AV_CPU_FLAG_SSSE3){
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_ssse3;
INIT_YMM avx
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
align 16
fft8_avx:
mova m0, Z(0)
INIT_YMM avx
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
%macro INTERL_AVX 5
vunpckhps %3, %2, %1
vunpcklps %2, %2, %1
dispatch_tab %+ fullsuffix: pointer list_of_fft
%endmacro ; DECL_FFT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
DECL_FFT 6
DECL_FFT 6, _interleave
INIT_YMM avx
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
DECL_IMDCT POSROTATESHUF_AVX
%endif
%define CHROMAMC_AVG NOTHING
INIT_XMM sse2
CHROMA_MC8 put
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CHROMA_MC8 put
%endif
%define PAVG pavgw
INIT_XMM sse2
CHROMA_MC8 avg
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CHROMA_MC8 avg
%endif
INIT_XMM sse2
IDCT_ADD_10
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD_10
%endif
INIT_XMM sse2
ALIGN 16
ADD4x4IDCT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
ALIGN 16
ADD4x4IDCT
INIT_XMM sse2
IDCT_ADD16_10
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD16_10
%endif
INIT_XMM sse2
IDCT8_DC_ADD
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_DC_ADD
%endif
INIT_XMM sse2
IDCT_ADD16INTRA_10
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD16INTRA_10
%endif
INIT_XMM sse2
IDCT_ADD8
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD8
%endif
INIT_XMM sse2
IDCT8_ADD
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_ADD
%endif
INIT_XMM sse2
IDCT8_ADD4
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_ADD4
%endif
PRED4x4_DR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_DR ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED4x4_DR avx
%endif
PRED4x4_VR sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_VR ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED4x4_VR avx
%endif
PRED4x4_HD sse2
%define PALIGNR PALIGNR_SSSE3
PRED4x4_HD ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED4x4_HD avx
%endif
INIT_XMM
PRED4x4_DL sse2
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED4x4_DL avx
%endif
INIT_XMM
PRED4x4_VL sse2
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED4x4_VL avx
%endif
INIT_XMM
PRED8x8L_TOP_DC sse2
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_TOP_DC avx
%endif
INIT_XMM
PRED8x8L_DC sse2
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_DC avx
%endif
INIT_XMM
PRED8x8L_VERTICAL sse2
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_VERTICAL avx
%endif
PRED8x8L_HORIZONTAL sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_HORIZONTAL avx
%endif
PRED8x8L_DOWN_LEFT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_LEFT ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_DOWN_LEFT avx
%endif
PRED8x8L_DOWN_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_DOWN_RIGHT ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_DOWN_RIGHT avx
%endif
PRED8x8L_VERTICAL_RIGHT sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_VERTICAL_RIGHT ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_VERTICAL_RIGHT avx
%endif
PRED8x8L_HORIZONTAL_UP sse2
%define PALIGNR PALIGNR_SSSE3
PRED8x8L_HORIZONTAL_UP ssse3
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_AVX
PRED8x8L_HORIZONTAL_UP avx
%endif
QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
H264_MC_4816(mmx2)
H264_MC_816(H264_MC_V, sse2)
H264_MC_816(H264_MC_HV, sse2)
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
H264_MC_816(H264_MC_H, ssse3)
H264_MC_816(H264_MC_HV, ssse3)
#endif
IDCT_ADD_FUNC(8, 8, mmx)
IDCT_ADD_FUNC(8, 8, sse2)
IDCT_ADD_FUNC(8, 10, sse2)
-#if HAVE_AVX
+#if HAVE_AVX_EXTERNAL
IDCT_ADD_FUNC(, 10, avx)
IDCT_ADD_FUNC(8_dc, 10, avx)
IDCT_ADD_FUNC(8, 10, avx)
IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
-#if HAVE_AVX
+#if HAVE_AVX_EXTERNAL
IDCT_ADD_REP_FUNC(, 16, 10, avx)
IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
#endif
IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
-#if HAVE_AVX
+#if HAVE_AVX_EXTERNAL
IDCT_ADD_REP_FUNC2(, 8, 10, avx)
#endif
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
}
-#if HAVE_AVX
+#if HAVE_AVX_EXTERNAL
if (mm_flags & AV_CPU_FLAG_AVX) {
c->h264_idct_dc_add =
c->h264_idct_add = ff_h264_idct_add_10_avx;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
#endif /* HAVE_ALIGNED_STACK */
}
-#endif /* HAVE_AVX */
+#endif /* HAVE_AVX_EXTERNAL */
}
}
}
#include "libavcodec/mpegvideo.h"
#include "dsputil_mmx.h"
-#if HAVE_INLINE_ASM
-
extern uint16_t ff_inv_zigzag_direct16[64];
-#if HAVE_MMX
+#if HAVE_MMX_INLINE
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 0
#define COMPILE_TEMPLATE_SSSE3 0
#define RENAME(a) a ## _MMX
#define RENAMEl(a) a ## _mmx
#include "mpegvideoenc_template.c"
-#endif /* HAVE_MMX */
+#endif /* HAVE_MMX_INLINE */
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
#undef COMPILE_TEMPLATE_SSSE3
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_MMXEXT
#define RENAME(a) a ## _MMX2
#define RENAMEl(a) a ## _mmx2
#include "mpegvideoenc_template.c"
-#endif /* HAVE_MMXEXT */
+#endif /* HAVE_MMXEXT_INLINE */
-#if HAVE_SSE2
+#if HAVE_SSE2_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define RENAME(a) a ## _SSE2
#define RENAMEl(a) a ## _sse2
#include "mpegvideoenc_template.c"
-#endif /* HAVE_SSE2 */
+#endif /* HAVE_SSE2_INLINE */
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define RENAME(a) a ## _SSSE3
#define RENAMEl(a) a ## _sse2
#include "mpegvideoenc_template.c"
-#endif /* HAVE_SSSE3 */
-
-#endif /* HAVE_INLINE_ASM */
+#endif /* HAVE_SSSE3_INLINE */
void ff_MPV_encode_init_x86(MpegEncContext *s)
{
-#if HAVE_INLINE_ASM
int mm_flags = av_get_cpu_flags();
const int dct_algo = s->avctx->dct_algo;
if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) {
-#if HAVE_MMX
+#if HAVE_MMX_INLINE
if (mm_flags & AV_CPU_FLAG_MMX && HAVE_MMX)
s->dct_quantize = dct_quantize_MMX;
#endif
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
if (mm_flags & AV_CPU_FLAG_MMXEXT && HAVE_MMXEXT)
s->dct_quantize = dct_quantize_MMX2;
#endif
-#if HAVE_SSE2
+#if HAVE_SSE2_INLINE
if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE2)
s->dct_quantize = dct_quantize_SSE2;
#endif
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
if (mm_flags & AV_CPU_FLAG_SSSE3)
s->dct_quantize = dct_quantize_SSSE3;
#endif
}
-#endif /* HAVE_INLINE_ASM */
}
DECLARE_ALIGNED(16, static const uint16_t, pw_7f)[8] = {0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F,0x7F};
DECLARE_ALIGNED(16, static const uint16_t, pw_ff)[8] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
static void gradfun_filter_line_mmx2(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
intptr_t x;
}
#endif
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
static void gradfun_filter_line_ssse3(uint8_t *dst, uint8_t *src, uint16_t *dc, int width, int thresh, const uint16_t *dithers)
{
intptr_t x;
:"memory"
);
}
-#endif // HAVE_SSSE3
+#endif /* HAVE_SSSE3_INLINE */
-#if HAVE_SSE
+#if HAVE_SSE2_INLINE
static void gradfun_blur_line_sse2(uint16_t *dc, uint16_t *buf, uint16_t *buf1, uint8_t *src, int src_linesize, int width)
{
#define BLURV(load)\
BLURV("movdqa");
}
}
-#endif // HAVE_SSE
+#endif /* HAVE_SSE2_INLINE */
#endif /* HAVE_INLINE_ASM */
{
int cpu_flags = av_get_cpu_flags();
-#if HAVE_INLINE_ASM
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
if (cpu_flags & AV_CPU_FLAG_MMXEXT)
gf->filter_line = gradfun_filter_line_mmx2;
#endif
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
if (cpu_flags & AV_CPU_FLAG_SSSE3)
gf->filter_line = gradfun_filter_line_ssse3;
#endif
-#if HAVE_SSE
+#if HAVE_SSE2_INLINE
if (cpu_flags & AV_CPU_FLAG_SSE2)
gf->blur_line = gradfun_blur_line_sse2;
#endif
-#endif /* HAVE_INLINE_ASM */
}
DECLARE_ASM_CONST(16, const xmm_reg, pb_1) = {0x0101010101010101ULL, 0x0101010101010101ULL};
DECLARE_ASM_CONST(16, const xmm_reg, pw_1) = {0x0001000100010001ULL, 0x0001000100010001ULL};
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
#define COMPILE_TEMPLATE_SSE2 1
#define COMPILE_TEMPLATE_SSSE3 1
#undef RENAME
#undef COMPILE_TEMPLATE_SSSE3
#endif
-#if HAVE_SSE
+#if HAVE_SSE2_INLINE
#undef RENAME
#define RENAME(a) a ## _sse2
#include "yadif_template.c"
#undef COMPILE_TEMPLATE_SSE2
#endif
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
#undef RENAME
#define RENAME(a) a ## _mmx2
#include "yadif_template.c"
{
int cpu_flags = av_get_cpu_flags();
-#if HAVE_INLINE_ASM
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
if (cpu_flags & AV_CPU_FLAG_MMXEXT)
yadif->filter_line = yadif_filter_line_mmx2;
#endif
-#if HAVE_SSE
+#if HAVE_SSE2_INLINE
if (cpu_flags & AV_CPU_FLAG_SSE2)
yadif->filter_line = yadif_filter_line_sse2;
#endif
-#if HAVE_SSSE3
+#if HAVE_SSSE3_INLINE
if (cpu_flags & AV_CPU_FLAG_SSSE3)
yadif->filter_line = yadif_filter_line_ssse3;
#endif
-#endif /* HAVE_INLINE_ASM */
}
INIT_XMM sse2
CONV_S32_TO_FLT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
CONV_S32_TO_FLT
%endif
INIT_XMM sse2
CONV_FLT_TO_S32
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
CONV_FLT_TO_S32
%endif
INIT_XMM sse2
CONV_S16P_TO_S16_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16P_TO_S16_2CH
%endif
CONV_S16P_TO_S16_6CH
INIT_XMM sse2slow
CONV_S16P_TO_S16_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16P_TO_S16_6CH
%endif
INIT_XMM sse2
CONV_S16P_TO_FLT_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16P_TO_FLT_2CH
%endif
CONV_S16P_TO_FLT_6CH
INIT_XMM ssse3
CONV_S16P_TO_FLT_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16P_TO_FLT_6CH
%endif
CONV_FLTP_TO_S16_6CH
INIT_XMM sse2
CONV_FLTP_TO_S16_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLTP_TO_S16_6CH
%endif
INIT_XMM sse
CONV_FLTP_TO_FLT_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLTP_TO_FLT_2CH
%endif
CONV_FLTP_TO_FLT_6CH
INIT_XMM sse4
CONV_FLTP_TO_FLT_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLTP_TO_FLT_6CH
%endif
CONV_S16_TO_S16P_2CH
INIT_XMM ssse3
CONV_S16_TO_S16P_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16_TO_S16P_2CH
%endif
%define PALIGNR PALIGNR_SSSE3
INIT_XMM ssse3
CONV_S16_TO_S16P_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16_TO_S16P_6CH
%endif
INIT_XMM sse2
CONV_S16_TO_FLTP_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16_TO_FLTP_2CH
%endif
CONV_S16_TO_FLTP_6CH
INIT_XMM sse4
CONV_S16_TO_FLTP_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_S16_TO_FLTP_6CH
%endif
INIT_XMM sse2
CONV_FLT_TO_S16P_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLT_TO_S16P_2CH
%endif
%define PALIGNR PALIGNR_SSSE3
INIT_XMM ssse3
CONV_FLT_TO_S16P_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLT_TO_S16P_6CH
%endif
INIT_XMM sse
CONV_FLT_TO_FLTP_2CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLT_TO_FLTP_2CH
%endif
INIT_XMM sse2
CONV_FLT_TO_FLTP_6CH
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CONV_FLT_TO_FLTP_6CH
%endif
INIT_XMM sse
MIX_2_TO_1_FLTP_FLT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
MIX_2_TO_1_FLTP_FLT
%endif
INIT_XMM sse
MIX_1_TO_2_FLTP_FLT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
MIX_1_TO_2_FLTP_FLT
%endif
MIX_1_TO_2_S16P_FLT
INIT_XMM sse4
MIX_1_TO_2_S16P_FLT
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_XMM avx
MIX_1_TO_2_S16P_FLT
%endif
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues
- %if HAVE_AVX
+ %if HAVE_AVX_EXTERNAL
%if ARCH_X86_64 || %%i < 6
INIT_YMM avx
%else
MIX_3_8_TO_1_2_FLT %%i, 1, s16p
MIX_3_8_TO_1_2_FLT %%i, 2, s16p
%endif
- %if HAVE_FMA4
+ %if HAVE_FMA4_EXTERNAL
%if ARCH_X86_64 || %%i < 6
INIT_YMM fma4
%else
# define ONLY_IF_THREADS_ENABLED(x) NULL
#endif
-#if HAVE_MMX && HAVE_INLINE_ASM
+#if HAVE_MMX_INLINE
/**
* Empty mmx state.
* this must be called between any dsp function and float/double code.
#elif HAVE_MMX && HAVE_MM_EMPTY
# include <mmintrin.h>
# define emms_c _mm_empty
-#else /* HAVE_MMX */
+#else
# define emms_c()
-#endif /* HAVE_MMX */
+#endif /* HAVE_MMX_INLINE */
#endif /* AVUTIL_INTERNAL_H */
INIT_XMM sse
VECTOR_FMUL
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMUL
%endif
INIT_XMM sse
VECTOR_FMAC_SCALAR
-%if HAVE_AVX
+%if HAVE_AVX_EXTERNAL
INIT_YMM avx
VECTOR_FMAC_SCALAR
%endif
if (!enough_lines)
break; // we can't output a dstY line so let's try with the next slice
-#if HAVE_MMX && HAVE_INLINE_ASM
+#if HAVE_MMX_INLINE
updateMMXDitherTables(c, dstY, lumBufIndex, chrBufIndex,
lastInLumBuf, lastInChrBuf);
#endif
return ret;
}
-#if HAVE_MMXEXT && HAVE_INLINE_ASM
+#if HAVE_MMXEXT_INLINE
static int initMMX2HScaler(int dstW, int xInc, uint8_t *filterCode,
int16_t *filter, int32_t *filterPos, int numSplits)
{
return fragmentPos + 1;
}
-#endif /* HAVE_MMXEXT && HAVE_INLINE_ASM */
+#endif /* HAVE_MMXEXT_INLINE */
static void getSubSampleFactors(int *h, int *v, enum PixelFormat format)
{
/* precalculate horizontal scaler filter coefficients */
{
-#if HAVE_MMXEXT && HAVE_INLINE_ASM
+#if HAVE_MMXEXT_INLINE
// can't downscale !!!
if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) {
c->lumMmx2FilterCodeSize = initMMX2HScaler(dstW, c->lumXInc, NULL,
mprotect(c->chrMmx2FilterCode, c->chrMmx2FilterCodeSize, PROT_EXEC | PROT_READ);
#endif
} else
-#endif /* HAVE_MMXEXT && HAVE_INLINE_ASM */
+#endif /* HAVE_MMXEXT_INLINE */
{
const int filterAlign =
(HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) ? 4 :
av_freep(&c->hLumFilterPos);
av_freep(&c->hChrFilterPos);
-#if HAVE_MMX
+#if HAVE_MMX_INLINE
#ifdef MAP_ANONYMOUS
if (c->lumMmx2FilterCode)
munmap(c->lumMmx2FilterCode, c->lumMmx2FilterCodeSize);
#endif
c->lumMmx2FilterCode = NULL;
c->chrMmx2FilterCode = NULL;
-#endif /* HAVE_MMX */
+#endif /* HAVE_MMX_INLINE */
av_freep(&c->yuvTable);
av_free(c->formatConvBuffer);
DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL;
//MMX versions
-#if HAVE_MMX
+#if HAVE_MMX_INLINE
#undef RENAME
#define COMPILE_TEMPLATE_MMXEXT 0
#define RENAME(a) a ## _MMX
#endif
//MMX2 versions
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
#undef RENAME
#undef COMPILE_TEMPLATE_MMXEXT
#define COMPILE_TEMPLATE_MMXEXT 1
#if HAVE_INLINE_ASM
if (cpu_flags & AV_CPU_FLAG_MMX)
sws_init_swScale_MMX(c);
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
if (cpu_flags & AV_CPU_FLAG_MMXEXT)
sws_init_swScale_MMX2(c);
#endif
DECLARE_ASM_CONST(8, uint64_t, pb_07) = 0x0707070707070707ULL;
//MMX versions
-#if HAVE_MMX
+#if HAVE_MMX_INLINE
#undef RENAME
#undef COMPILE_TEMPLATE_MMXEXT
#define COMPILE_TEMPLATE_MMXEXT 0
#define RENAME(a) a ## _MMX
#include "yuv2rgb_template.c"
-#endif /* HAVE_MMX */
+#endif /* HAVE_MMX_INLINE */
//MMX2 versions
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
#undef RENAME
#undef COMPILE_TEMPLATE_MMXEXT
#define COMPILE_TEMPLATE_MMXEXT 1
#define RENAME(a) a ## _MMX2
#include "yuv2rgb_template.c"
-#endif /* HAVE_MMXEXT */
+#endif /* HAVE_MMXEXT_INLINE */
#endif /* HAVE_INLINE_ASM */
c->srcFormat != PIX_FMT_YUVA420P)
return NULL;
-#if HAVE_MMXEXT
+#if HAVE_MMXEXT_INLINE
if (cpu_flags & AV_CPU_FLAG_MMXEXT) {
switch (c->dstFormat) {
case PIX_FMT_RGB24: return yuv420_rgb24_MMX2;