1 ;******************************************************************************
2 ;* x86 optimized channel mixing
3 ;* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
5 ;* This file is part of Libav.
7 ;* Libav is free software; you can redistribute it and/or
8 ;* modify it under the terms of the GNU Lesser General Public
9 ;* License as published by the Free Software Foundation; either
10 ;* version 2.1 of the License, or (at your option) any later version.
12 ;* Libav is distributed in the hope that it will be useful,
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;* Lesser General Public License for more details.
17 ;* You should have received a copy of the GNU Lesser General Public
18 ;* License along with Libav; if not, write to the Free Software
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 ;******************************************************************************
23 %include "x86util.asm"
28 ;-----------------------------------------------------------------------------
29 ; void ff_mix_2_to_1_fltp_flt(float **src, float **matrix, int len,
30 ; int out_ch, int in_ch);
31 ;-----------------------------------------------------------------------------
33 %macro MIX_2_TO_1_FLTP_FLT 0
34 cglobal mix_2_to_1_fltp_flt, 3,4,6, src, matrix, len, src1
35 mov src1q, [srcq+gprsize]
38 mov matrixq, [matrixq ]
39 VBROADCASTSS m4, [matrixq ]
40 VBROADCASTSS m5, [matrixq+4]
44 mulps m1, m5, [srcq+src1q ]
45 mulps m2, m4, [srcq+ mmsize]
46 mulps m3, m5, [srcq+src1q+mmsize]
50 mova [srcq+mmsize], m2
64 ;-----------------------------------------------------------------------------
65 ; void ff_mix_2_to_1_s16p_flt(int16_t **src, float **matrix, int len,
66 ; int out_ch, int in_ch);
67 ;-----------------------------------------------------------------------------
69 %macro MIX_2_TO_1_S16P_FLT 0
70 cglobal mix_2_to_1_s16p_flt, 3,4,6, src, matrix, len, src1
71 mov src1q, [srcq+gprsize]
74 mov matrixq, [matrixq ]
75 VBROADCASTSS m4, [matrixq ]
76 VBROADCASTSS m5, [matrixq+4]
108 ;-----------------------------------------------------------------------------
109 ; void ff_mix_2_to_1_s16p_q8(int16_t **src, int16_t **matrix, int len,
110 ; int out_ch, int in_ch);
111 ;-----------------------------------------------------------------------------
114 cglobal mix_2_to_1_s16p_q8, 3,4,6, src, matrix, len, src1
115 mov src1q, [srcq+gprsize]
118 mov matrixq, [matrixq]
129 mova m2, [srcq+src1q]
149 ;-----------------------------------------------------------------------------
150 ; void ff_mix_1_to_2_fltp_flt(float **src, float **matrix, int len,
151 ; int out_ch, int in_ch);
152 ;-----------------------------------------------------------------------------
154 %macro MIX_1_TO_2_FLTP_FLT 0
155 cglobal mix_1_to_2_fltp_flt, 3,5,4, src0, matrix0, len, src1, matrix1
156 mov src1q, [src0q+gprsize]
159 mov matrix1q, [matrix0q+gprsize]
160 mov matrix0q, [matrix0q]
161 VBROADCASTSS m2, [matrix0q]
162 VBROADCASTSS m3, [matrix1q]
169 mova [src0q+src1q], m1
183 ;-----------------------------------------------------------------------------
184 ; void ff_mix_1_to_2_s16p_flt(int16_t **src, float **matrix, int len,
185 ; int out_ch, int in_ch);
186 ;-----------------------------------------------------------------------------
188 %macro MIX_1_TO_2_S16P_FLT 0
189 cglobal mix_1_to_2_s16p_flt, 3,5,6, src0, matrix0, len, src1, matrix1
190 mov src1q, [src0q+gprsize]
193 mov matrix1q, [matrix0q+gprsize]
194 mov matrix0q, [matrix0q]
195 VBROADCASTSS m4, [matrix0q]
196 VBROADCASTSS m5, [matrix1q]
214 mova [src0q+src1q], m1
230 ;-----------------------------------------------------------------------------
231 ; void ff_mix_3_8_to_1_2_fltp/s16p_flt(float/int16_t **src, float **matrix,
232 ; int len, int out_ch, int in_ch);
233 ;-----------------------------------------------------------------------------
235 %macro MIX_3_8_TO_1_2_FLT 3 ; %1 = in channels, %2 = out channels, %3 = s16p or fltp
236 ; define some names to make the code clearer
237 %assign in_channels %1
238 %assign out_channels %2
239 %assign stereo out_channels - 1
246 ; determine how many matrix elements must go on the stack vs. mmregs
247 %assign matrix_elements in_channels * out_channels
250 %assign needed_mmregs 7
252 %assign needed_mmregs 5
256 %assign needed_mmregs 4
258 %assign needed_mmregs 3
261 %assign matrix_elements_mm num_mmregs - needed_mmregs
262 %if matrix_elements < matrix_elements_mm
263 %assign matrix_elements_mm matrix_elements
265 %if matrix_elements_mm < matrix_elements
266 %assign matrix_elements_stack matrix_elements - matrix_elements_mm
268 %assign matrix_elements_stack 0
271 cglobal mix_%1_to_%2_%3_flt, 3,in_channels+2,needed_mmregs+matrix_elements_mm, src0, src1, len, src2, src3, src4, src5, src6, src7
273 ; get aligned stack space if needed
274 %if matrix_elements_stack > 0
276 %assign bkpreg %1 + 1
277 %define bkpq r %+ bkpreg %+ q
280 sub rsp, matrix_elements_stack * mmsize
282 %assign pad matrix_elements_stack * mmsize + (mmsize - gprsize) - (stack_offset & (mmsize - gprsize))
287 ; load matrix pointers
291 mov matrix1q, [matrix0q+gprsize]
293 mov matrix0q, [matrix0q]
295 ; define matrix coeff names
297 %assign %%j needed_mmregs
299 %if %%i >= matrix_elements_mm
300 CAT_XDEFINE mx_stack_0_, %%i, 1
301 CAT_XDEFINE mx_0_, %%i, [rsp+(%%i-matrix_elements_mm)*mmsize]
303 CAT_XDEFINE mx_stack_0_, %%i, 0
304 CAT_XDEFINE mx_0_, %%i, m %+ %%j
312 %if in_channels + %%i >= matrix_elements_mm
313 CAT_XDEFINE mx_stack_1_, %%i, 1
314 CAT_XDEFINE mx_1_, %%i, [rsp+(in_channels+%%i-matrix_elements_mm)*mmsize]
316 CAT_XDEFINE mx_stack_1_, %%i, 0
317 CAT_XDEFINE mx_1_, %%i, m %+ %%j
324 ; load/splat matrix coeffs
327 %if mx_stack_0_ %+ %%i
328 VBROADCASTSS m0, [matrix0q+4*%%i]
329 mova mx_0_ %+ %%i, m0
331 VBROADCASTSS mx_0_ %+ %%i, [matrix0q+4*%%i]
334 %if mx_stack_1_ %+ %%i
335 VBROADCASTSS m0, [matrix1q+4*%%i]
336 mova mx_1_ %+ %%i, m0
338 VBROADCASTSS mx_1_ %+ %%i, [matrix1q+4*%%i]
344 ; load channel pointers to registers as offsets from the first channel pointer
350 %rep (in_channels - 1)
351 %if ARCH_X86_32 && in_channels >= 7 && %%i >= 5
352 mov src5q, [src0q+%%i*gprsize]
354 mov src %+ %%i %+ m, src5q
356 mov src %+ %%i %+ q, [src0q+%%i*gprsize]
357 add src %+ %%i %+ q, lenq
365 ; for x86-32 with 7-8 channels we do not have enough gp registers for all src
366 ; pointers, so we have to load some of them from the stack each time
367 %define copy_src_from_stack ARCH_X86_32 && in_channels >= 7 && %%i >= 5
369 ; mix with s16p input
370 mova m0, [src0q+lenq]
381 %rep (in_channels - 1)
382 %if copy_src_from_stack
383 %define src_ptr src5q
385 %define src_ptr src %+ %%i %+ q
388 %if copy_src_from_stack
389 mov src_ptr, src %+ %%i %+ m
391 mova m4, [src_ptr+lenq]
395 fmaddps m2, m4, mx_1_ %+ %%i, m2, m6
396 fmaddps m3, m5, mx_1_ %+ %%i, m3, m6
397 fmaddps m0, m4, mx_0_ %+ %%i, m0, m4
398 fmaddps m1, m5, mx_0_ %+ %%i, m1, m5
400 %if copy_src_from_stack
401 mov src_ptr, src %+ %%i %+ m
403 mova m2, [src_ptr+lenq]
407 fmaddps m0, m2, mx_0_ %+ %%i, m0, m4
408 fmaddps m1, m3, mx_0_ %+ %%i, m1, m4
416 mova [src1q+lenq], m2
421 mova [src0q+lenq], m0
423 ; mix with fltp input
424 %if stereo || mx_stack_0_0
425 mova m0, [src0q+lenq]
430 %if stereo || mx_stack_0_0
433 mulps m0, [src0q+lenq], mx_0_0
436 %rep (in_channels - 1)
437 %if copy_src_from_stack
438 %define src_ptr src5q
439 mov src_ptr, src %+ %%i %+ m
441 %define src_ptr src %+ %%i %+ q
443 ; avoid extra load for mono if matrix is in a mm register
444 %if stereo || mx_stack_0_ %+ %%i
445 mova m2, [src_ptr+lenq]
448 fmaddps m1, m2, mx_1_ %+ %%i, m1, m3
450 %if stereo || mx_stack_0_ %+ %%i
451 fmaddps m0, m2, mx_0_ %+ %%i, m0, m2
453 fmaddps m0, mx_0_ %+ %%i, [src_ptr+lenq], m0, m1
457 mova [src0q+lenq], m0
459 mova [src1q+lenq], m1
465 ; restore stack pointer
466 %if matrix_elements_stack > 0
473 ; zero ymm high halves
480 %macro MIX_3_8_TO_1_2_FLT_FUNCS 0
484 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
485 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
487 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
488 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
490 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
491 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
492 ; do not use ymm AVX or FMA4 in x86-32 for 6 or more channels due to stack alignment issues
494 %if ARCH_X86_64 || %%i < 6
499 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
500 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
502 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
503 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
506 %if ARCH_X86_64 || %%i < 6
511 MIX_3_8_TO_1_2_FLT %%i, 1, fltp
512 MIX_3_8_TO_1_2_FLT %%i, 2, fltp
514 MIX_3_8_TO_1_2_FLT %%i, 1, s16p
515 MIX_3_8_TO_1_2_FLT %%i, 2, s16p
521 MIX_3_8_TO_1_2_FLT_FUNCS