From: Michael Niedermayer Date: Fri, 14 Sep 2012 12:07:21 +0000 (+0200) Subject: Merge remote-tracking branch 'qatar/master' X-Git-Tag: n1.1-dev~246 X-Git-Url: http://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff_plain/509f502902e3760358d5c0c189f3d288652406e5 Merge remote-tracking branch 'qatar/master' * qatar/master: x86: dsputil: Move Xvid IDCT put/add functions to a more suitable place trasher: Include all the necessary headers x86: Remove some leftover declarations for non-existent functions ARM: libavresample: NEON optimised generic fltp to s16 conversion ARM: libavresample: NEON optimised stereo fltp to s16 conversion ARM: libavresample: NEON optimised flat float to s16 conversion Conflicts: libavcodec/x86/dsputil_mmx.c Merged-by: Michael Niedermayer --- 509f502902e3760358d5c0c189f3d288652406e5 diff --cc libavcodec/x86/dsputil_mmx.c index 567d8be,121e711..c36a027 --- a/libavcodec/x86/dsputil_mmx.c +++ b/libavcodec/x86/dsputil_mmx.c @@@ -2181,140 -2161,6 +2181,116 @@@ void ff_avg_vc1_mspel_mc00_mmx2(uint8_ avg_pixels8_mmx2(dst, src, stride, 8); } +/* only used in VP3/5/6 */ +static void put_vp_no_rnd_pixels8_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h) +{ +// START_TIMER + MOVQ_BFE(mm6); + __asm__ volatile( + "1: \n\t" + "movq (%1), %%mm0 \n\t" + "movq (%2), %%mm1 \n\t" + "movq (%1,%4), %%mm2 \n\t" + "movq (%2,%4), %%mm3 \n\t" + PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3) \n\t" + "movq %%mm5, (%3,%4) \n\t" + + "movq (%1,%4,2), %%mm0 \n\t" + "movq (%2,%4,2), %%mm1 \n\t" + "movq (%1,%5), %%mm2 \n\t" + "movq (%2,%5), %%mm3 \n\t" + "lea (%1,%4,4), %1 \n\t" + "lea (%2,%4,4), %2 \n\t" + PAVGBP_MMX_NO_RND(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) + "movq %%mm4, (%3,%4,2) \n\t" + "movq %%mm5, (%3,%5) \n\t" + "lea (%3,%4,4), %3 \n\t" + "subl $4, %0 \n\t" + "jnz 1b \n\t" + :"+r"(h), "+r"(a), "+r"(b), "+r"(dst) + :"r"((x86_reg)stride), "r"((x86_reg)3L*stride) + :"memory"); +// STOP_TIMER("put_vp_no_rnd_pixels8_l2_mmx") +} +static void put_vp_no_rnd_pixels16_l2_mmx(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h) +{ + put_vp_no_rnd_pixels8_l2_mmx(dst, a, b, stride, h); + put_vp_no_rnd_pixels8_l2_mmx(dst+8, a+8, b+8, stride, h); +} + +#if CONFIG_DIRAC_DECODER +#define DIRAC_PIXOP(OPNAME, EXT)\ +void ff_ ## OPNAME ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\ +{\ + OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\ +}\ +void ff_ ## OPNAME ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\ +{\ + OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\ +}\ +void ff_ ## OPNAME ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\ +{\ + OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\ + OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\ +} + +DIRAC_PIXOP(put, mmx) +DIRAC_PIXOP(avg, mmx) +DIRAC_PIXOP(avg, mmx2) + +void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) +{ + put_pixels16_sse2(dst, src[0], stride, h); +} +void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) +{ + avg_pixels16_sse2(dst, src[0], stride, h); +} +void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) +{ + put_pixels16_sse2(dst , src[0] , stride, h); + put_pixels16_sse2(dst+16, src[0]+16, stride, h); +} +void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h) +{ + avg_pixels16_sse2(dst , src[0] , stride, h); + avg_pixels16_sse2(dst+16, src[0]+16, stride, h); +} +#endif + +/* XXX: Those functions should be suppressed ASAP when all IDCTs are + * converted. */ +#if CONFIG_GPL +static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, + DCTELEM *block) +{ + ff_mmx_idct(block); + ff_put_pixels_clamped_mmx(block, dest, line_size); +} + +static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, + DCTELEM *block) +{ + ff_mmx_idct(block); + ff_add_pixels_clamped_mmx(block, dest, line_size); +} + +static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, + DCTELEM *block) +{ + ff_mmxext_idct(block); + ff_put_pixels_clamped_mmx(block, dest, line_size); +} + +static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, + DCTELEM *block) +{ + ff_mmxext_idct(block); + ff_add_pixels_clamped_mmx(block, dest, line_size); +} +#endif + - static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) - { - ff_idct_xvid_mmx(block); - ff_put_pixels_clamped_mmx(block, dest, line_size); - } - - static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) - { - ff_idct_xvid_mmx(block); - ff_add_pixels_clamped_mmx(block, dest, line_size); - } - - static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) - { - ff_idct_xvid_mmx2(block); - ff_put_pixels_clamped_mmx(block, dest, line_size); - } - - static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) - { - ff_idct_xvid_mmx2(block); - ff_add_pixels_clamped_mmx(block, dest, line_size); - } - static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) { int i; diff --cc libavresample/arm/audio_convert_init.c index 0000000,bbb7bae..3d19a0e mode 000000,100644..100644 --- a/libavresample/arm/audio_convert_init.c +++ b/libavresample/arm/audio_convert_init.c @@@ -1,0 -1,49 +1,49 @@@ + /* - * This file is part of Libav. ++ * This file is part of FFmpeg. + * - * Libav is free software; you can redistribute it and/or ++ * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * - * Libav is distributed in the hope that it will be useful, ++ * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software ++ * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + #include + + #include "config.h" + #include "libavutil/attributes.h" + #include "libavutil/cpu.h" + #include "libavutil/arm/cpu.h" + #include "libavutil/samplefmt.h" + #include "libavresample/audio_convert.h" + + void ff_conv_flt_to_s16_neon(int16_t *dst, const float *src, int len); + void ff_conv_fltp_to_s16_neon(int16_t *dst, float *const *src, + int len, int channels); + void ff_conv_fltp_to_s16_2ch_neon(int16_t *dst, float *const *src, + int len, int channels); + + av_cold void ff_audio_convert_init_arm(AudioConvert *ac) + { + int cpu_flags = av_get_cpu_flags(); + + if (have_neon(cpu_flags)) { + ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, + 0, 16, 8, "NEON", + ff_conv_flt_to_s16_neon); + ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP, + 0, 16, 8, "NEON", + ff_conv_fltp_to_s16_neon); + ff_audio_convert_set_func(ac, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLTP, + 2, 16, 8, "NEON", + ff_conv_fltp_to_s16_2ch_neon); + } + } diff --cc libavresample/arm/audio_convert_neon.S index 0000000,092ce0c..98f77f0 mode 000000,100644..100644 --- a/libavresample/arm/audio_convert_neon.S +++ b/libavresample/arm/audio_convert_neon.S @@@ -1,0 -1,363 +1,363 @@@ + /* + * Copyright (c) 2008 Mans Rullgard + * - * This file is part of Libav. ++ * This file is part of FFmpeg + * - * Libav is free software; you can redistribute it and/or ++ * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * - * Libav is distributed in the hope that it will be useful, ++ * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software ++ * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + #include "config.h" + #include "libavutil/arm/asm.S" + + function ff_conv_flt_to_s16_neon, export=1 + subs r2, r2, #8 + vld1.32 {q0}, [r1,:128]! + vcvt.s32.f32 q8, q0, #31 + vld1.32 {q1}, [r1,:128]! + vcvt.s32.f32 q9, q1, #31 + beq 3f + bics r12, r2, #15 + beq 2f + 1: subs r12, r12, #16 + vqrshrn.s32 d4, q8, #16 + vld1.32 {q0}, [r1,:128]! + vcvt.s32.f32 q0, q0, #31 + vqrshrn.s32 d5, q9, #16 + vld1.32 {q1}, [r1,:128]! + vcvt.s32.f32 q1, q1, #31 + vqrshrn.s32 d6, q0, #16 + vst1.16 {q2}, [r0,:128]! + vqrshrn.s32 d7, q1, #16 + vld1.32 {q8}, [r1,:128]! + vcvt.s32.f32 q8, q8, #31 + vld1.32 {q9}, [r1,:128]! + vcvt.s32.f32 q9, q9, #31 + vst1.16 {q3}, [r0,:128]! + bne 1b + ands r2, r2, #15 + beq 3f + 2: vld1.32 {q0}, [r1,:128]! + vqrshrn.s32 d4, q8, #16 + vcvt.s32.f32 q0, q0, #31 + vld1.32 {q1}, [r1,:128]! + vqrshrn.s32 d5, q9, #16 + vcvt.s32.f32 q1, q1, #31 + vqrshrn.s32 d6, q0, #16 + vst1.16 {q2}, [r0,:128]! + vqrshrn.s32 d7, q1, #16 + vst1.16 {q3}, [r0,:128]! + bx lr + 3: vqrshrn.s32 d4, q8, #16 + vqrshrn.s32 d5, q9, #16 + vst1.16 {q2}, [r0,:128]! + bx lr + endfunc + + function ff_conv_fltp_to_s16_2ch_neon, export=1 + ldm r1, {r1, r3} + subs r2, r2, #8 + vld1.32 {q0}, [r1,:128]! + vcvt.s32.f32 q8, q0, #31 + vld1.32 {q1}, [r1,:128]! + vcvt.s32.f32 q9, q1, #31 + vld1.32 {q10}, [r3,:128]! + vcvt.s32.f32 q10, q10, #31 + vld1.32 {q11}, [r3,:128]! + vcvt.s32.f32 q11, q11, #31 + beq 3f + bics r12, r2, #15 + beq 2f + 1: subs r12, r12, #16 + vld1.32 {q0}, [r1,:128]! + vcvt.s32.f32 q0, q0, #31 + vsri.32 q10, q8, #16 + vld1.32 {q1}, [r1,:128]! + vcvt.s32.f32 q1, q1, #31 + vld1.32 {q12}, [r3,:128]! + vcvt.s32.f32 q12, q12, #31 + vld1.32 {q13}, [r3,:128]! + vsri.32 q11, q9, #16 + vst1.16 {q10}, [r0,:128]! + vcvt.s32.f32 q13, q13, #31 + vst1.16 {q11}, [r0,:128]! + vsri.32 q12, q0, #16 + vld1.32 {q8}, [r1,:128]! + vsri.32 q13, q1, #16 + vst1.16 {q12}, [r0,:128]! + vcvt.s32.f32 q8, q8, #31 + vld1.32 {q9}, [r1,:128]! + vcvt.s32.f32 q9, q9, #31 + vld1.32 {q10}, [r3,:128]! + vcvt.s32.f32 q10, q10, #31 + vld1.32 {q11}, [r3,:128]! + vcvt.s32.f32 q11, q11, #31 + vst1.16 {q13}, [r0,:128]! + bne 1b + ands r2, r2, #15 + beq 3f + 2: vsri.32 q10, q8, #16 + vld1.32 {q0}, [r1,:128]! + vcvt.s32.f32 q0, q0, #31 + vld1.32 {q1}, [r1,:128]! + vcvt.s32.f32 q1, q1, #31 + vld1.32 {q12}, [r3,:128]! + vcvt.s32.f32 q12, q12, #31 + vsri.32 q11, q9, #16 + vld1.32 {q13}, [r3,:128]! + vcvt.s32.f32 q13, q13, #31 + vst1.16 {q10}, [r0,:128]! + vsri.32 q12, q0, #16 + vst1.16 {q11}, [r0,:128]! + vsri.32 q13, q1, #16 + vst1.16 {q12-q13},[r0,:128]! + bx lr + 3: vsri.32 q10, q8, #16 + vsri.32 q11, q9, #16 + vst1.16 {q10-q11},[r0,:128]! + bx lr + endfunc + + function ff_conv_fltp_to_s16_neon, export=1 + cmp r3, #2 + itt lt + ldrlt r1, [r1] + blt ff_conv_flt_to_s16_neon + beq ff_conv_fltp_to_s16_2ch_neon + + push {r4-r8, lr} + cmp r3, #4 + lsl r12, r3, #1 + blt 4f + + @ 4 channels + 5: ldm r1!, {r4-r7} + mov lr, r2 + mov r8, r0 + vld1.32 {q8}, [r4,:128]! + vcvt.s32.f32 q8, q8, #31 + vld1.32 {q9}, [r5,:128]! + vcvt.s32.f32 q9, q9, #31 + vld1.32 {q10}, [r6,:128]! + vcvt.s32.f32 q10, q10, #31 + vld1.32 {q11}, [r7,:128]! + vcvt.s32.f32 q11, q11, #31 + 6: subs lr, lr, #8 + vld1.32 {q0}, [r4,:128]! + vcvt.s32.f32 q0, q0, #31 + vsri.32 q9, q8, #16 + vld1.32 {q1}, [r5,:128]! + vcvt.s32.f32 q1, q1, #31 + vsri.32 q11, q10, #16 + vld1.32 {q2}, [r6,:128]! + vcvt.s32.f32 q2, q2, #31 + vzip.32 d18, d22 + vld1.32 {q3}, [r7,:128]! + vcvt.s32.f32 q3, q3, #31 + vzip.32 d19, d23 + vst1.16 {d18}, [r8], r12 + vsri.32 q1, q0, #16 + vst1.16 {d22}, [r8], r12 + vsri.32 q3, q2, #16 + vst1.16 {d19}, [r8], r12 + vzip.32 d2, d6 + vst1.16 {d23}, [r8], r12 + vzip.32 d3, d7 + beq 7f + vld1.32 {q8}, [r4,:128]! + vcvt.s32.f32 q8, q8, #31 + vst1.16 {d2}, [r8], r12 + vld1.32 {q9}, [r5,:128]! + vcvt.s32.f32 q9, q9, #31 + vst1.16 {d6}, [r8], r12 + vld1.32 {q10}, [r6,:128]! + vcvt.s32.f32 q10, q10, #31 + vst1.16 {d3}, [r8], r12 + vld1.32 {q11}, [r7,:128]! + vcvt.s32.f32 q11, q11, #31 + vst1.16 {d7}, [r8], r12 + b 6b + 7: vst1.16 {d2}, [r8], r12 + vst1.16 {d6}, [r8], r12 + vst1.16 {d3}, [r8], r12 + vst1.16 {d7}, [r8], r12 + subs r3, r3, #4 + it eq + popeq {r4-r8, pc} + cmp r3, #4 + add r0, r0, #8 + bge 5b + + @ 2 channels + 4: cmp r3, #2 + blt 4f + ldm r1!, {r4-r5} + mov lr, r2 + mov r8, r0 + tst lr, #8 + vld1.32 {q8}, [r4,:128]! + vcvt.s32.f32 q8, q8, #31 + vld1.32 {q9}, [r5,:128]! + vcvt.s32.f32 q9, q9, #31 + vld1.32 {q10}, [r4,:128]! + vcvt.s32.f32 q10, q10, #31 + vld1.32 {q11}, [r5,:128]! + vcvt.s32.f32 q11, q11, #31 + beq 6f + subs lr, lr, #8 + beq 7f + vsri.32 d18, d16, #16 + vsri.32 d19, d17, #16 + vld1.32 {q8}, [r4,:128]! + vcvt.s32.f32 q8, q8, #31 + vst1.32 {d18[0]}, [r8], r12 + vsri.32 d22, d20, #16 + vst1.32 {d18[1]}, [r8], r12 + vsri.32 d23, d21, #16 + vst1.32 {d19[0]}, [r8], r12 + vst1.32 {d19[1]}, [r8], r12 + vld1.32 {q9}, [r5,:128]! + vcvt.s32.f32 q9, q9, #31 + vst1.32 {d22[0]}, [r8], r12 + vst1.32 {d22[1]}, [r8], r12 + vld1.32 {q10}, [r4,:128]! + vcvt.s32.f32 q10, q10, #31 + vst1.32 {d23[0]}, [r8], r12 + vst1.32 {d23[1]}, [r8], r12 + vld1.32 {q11}, [r5,:128]! + vcvt.s32.f32 q11, q11, #31 + 6: subs lr, lr, #16 + vld1.32 {q0}, [r4,:128]! + vcvt.s32.f32 q0, q0, #31 + vsri.32 d18, d16, #16 + vld1.32 {q1}, [r5,:128]! + vcvt.s32.f32 q1, q1, #31 + vsri.32 d19, d17, #16 + vld1.32 {q2}, [r4,:128]! + vcvt.s32.f32 q2, q2, #31 + vld1.32 {q3}, [r5,:128]! + vcvt.s32.f32 q3, q3, #31 + vst1.32 {d18[0]}, [r8], r12 + vsri.32 d22, d20, #16 + vst1.32 {d18[1]}, [r8], r12 + vsri.32 d23, d21, #16 + vst1.32 {d19[0]}, [r8], r12 + vsri.32 d2, d0, #16 + vst1.32 {d19[1]}, [r8], r12 + vsri.32 d3, d1, #16 + vst1.32 {d22[0]}, [r8], r12 + vsri.32 d6, d4, #16 + vst1.32 {d22[1]}, [r8], r12 + vsri.32 d7, d5, #16 + vst1.32 {d23[0]}, [r8], r12 + vst1.32 {d23[1]}, [r8], r12 + beq 6f + vld1.32 {q8}, [r4,:128]! + vcvt.s32.f32 q8, q8, #31 + vst1.32 {d2[0]}, [r8], r12 + vst1.32 {d2[1]}, [r8], r12 + vld1.32 {q9}, [r5,:128]! + vcvt.s32.f32 q9, q9, #31 + vst1.32 {d3[0]}, [r8], r12 + vst1.32 {d3[1]}, [r8], r12 + vld1.32 {q10}, [r4,:128]! + vcvt.s32.f32 q10, q10, #31 + vst1.32 {d6[0]}, [r8], r12 + vst1.32 {d6[1]}, [r8], r12 + vld1.32 {q11}, [r5,:128]! + vcvt.s32.f32 q11, q11, #31 + vst1.32 {d7[0]}, [r8], r12 + vst1.32 {d7[1]}, [r8], r12 + bgt 6b + 6: vst1.32 {d2[0]}, [r8], r12 + vst1.32 {d2[1]}, [r8], r12 + vst1.32 {d3[0]}, [r8], r12 + vst1.32 {d3[1]}, [r8], r12 + vst1.32 {d6[0]}, [r8], r12 + vst1.32 {d6[1]}, [r8], r12 + vst1.32 {d7[0]}, [r8], r12 + vst1.32 {d7[1]}, [r8], r12 + b 8f + 7: vsri.32 d18, d16, #16 + vsri.32 d19, d17, #16 + vst1.32 {d18[0]}, [r8], r12 + vsri.32 d22, d20, #16 + vst1.32 {d18[1]}, [r8], r12 + vsri.32 d23, d21, #16 + vst1.32 {d19[0]}, [r8], r12 + vst1.32 {d19[1]}, [r8], r12 + vst1.32 {d22[0]}, [r8], r12 + vst1.32 {d22[1]}, [r8], r12 + vst1.32 {d23[0]}, [r8], r12 + vst1.32 {d23[1]}, [r8], r12 + 8: subs r3, r3, #2 + add r0, r0, #4 + it eq + popeq {r4-r8, pc} + + @ 1 channel + 4: ldr r4, [r1] + tst r2, #8 + mov lr, r2 + mov r5, r0 + vld1.32 {q0}, [r4,:128]! + vcvt.s32.f32 q0, q0, #31 + vld1.32 {q1}, [r4,:128]! + vcvt.s32.f32 q1, q1, #31 + bne 8f + 6: subs lr, lr, #16 + vld1.32 {q2}, [r4,:128]! + vcvt.s32.f32 q2, q2, #31 + vld1.32 {q3}, [r4,:128]! + vcvt.s32.f32 q3, q3, #31 + vst1.16 {d0[1]}, [r5,:16], r12 + vst1.16 {d0[3]}, [r5,:16], r12 + vst1.16 {d1[1]}, [r5,:16], r12 + vst1.16 {d1[3]}, [r5,:16], r12 + vst1.16 {d2[1]}, [r5,:16], r12 + vst1.16 {d2[3]}, [r5,:16], r12 + vst1.16 {d3[1]}, [r5,:16], r12 + vst1.16 {d3[3]}, [r5,:16], r12 + beq 7f + vld1.32 {q0}, [r4,:128]! + vcvt.s32.f32 q0, q0, #31 + vld1.32 {q1}, [r4,:128]! + vcvt.s32.f32 q1, q1, #31 + 7: vst1.16 {d4[1]}, [r5,:16], r12 + vst1.16 {d4[3]}, [r5,:16], r12 + vst1.16 {d5[1]}, [r5,:16], r12 + vst1.16 {d5[3]}, [r5,:16], r12 + vst1.16 {d6[1]}, [r5,:16], r12 + vst1.16 {d6[3]}, [r5,:16], r12 + vst1.16 {d7[1]}, [r5,:16], r12 + vst1.16 {d7[3]}, [r5,:16], r12 + bgt 6b + pop {r4-r8, pc} + 8: subs lr, lr, #8 + vst1.16 {d0[1]}, [r5,:16], r12 + vst1.16 {d0[3]}, [r5,:16], r12 + vst1.16 {d1[1]}, [r5,:16], r12 + vst1.16 {d1[3]}, [r5,:16], r12 + vst1.16 {d2[1]}, [r5,:16], r12 + vst1.16 {d2[3]}, [r5,:16], r12 + vst1.16 {d3[1]}, [r5,:16], r12 + vst1.16 {d3[3]}, [r5,:16], r12 + it eq + popeq {r4-r8, pc} + vld1.32 {q0}, [r4,:128]! + vcvt.s32.f32 q0, q0, #31 + vld1.32 {q1}, [r4,:128]! + vcvt.s32.f32 q1, q1, #31 + b 6b + endfunc