swr: aarch64 audio_convert and neon clobber test
authorMichael Niedermayer <michaelni@gmx.at>
Sun, 24 Aug 2014 15:09:45 +0000 (17:09 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Wed, 27 Aug 2014 18:06:37 +0000 (20:06 +0200)
Ported from avresample
Code by:  Mans Rullgard, Janne Grunau, Martin Storsjo

Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
libswresample/aarch64/Makefile [new file with mode: 0644]
libswresample/aarch64/audio_convert_init.c [new file with mode: 0644]
libswresample/aarch64/audio_convert_neon.S [new file with mode: 0644]
libswresample/aarch64/neontest.c [new file with mode: 0644]
libswresample/audioconvert.c
libswresample/swresample_internal.h

diff --git a/libswresample/aarch64/Makefile b/libswresample/aarch64/Makefile
new file mode 100644 (file)
index 0000000..320ed67
--- /dev/null
@@ -0,0 +1,5 @@
+OBJS                             += aarch64/audio_convert_init.o
+
+OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
+
+NEON-OBJS                        += aarch64/audio_convert_neon.o
diff --git a/libswresample/aarch64/audio_convert_init.c b/libswresample/aarch64/audio_convert_init.c
new file mode 100644 (file)
index 0000000..60e24ad
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * This file is part of libswresample.
+ *
+ * libswresample is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * libswresample is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with libswresample; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavutil/samplefmt.h"
+#include "libswresample/swresample_internal.h"
+#include "libswresample/audioconvert.h"
+
+void swri_oldapi_conv_flt_to_s16_neon(int16_t *dst, const float *src, int len);
+void swri_oldapi_conv_fltp_to_s16_2ch_neon(int16_t *dst, float *const *src, int len, int channels);
+void swri_oldapi_conv_fltp_to_s16_nch_neon(int16_t *dst, float *const *src, int len, int channels);
+
+static void conv_flt_to_s16_neon(uint8_t **dst, const uint8_t **src, int len){
+    swri_oldapi_conv_flt_to_s16_neon((int16_t*)*dst, (const float*)*src, len);
+}
+
+static void conv_fltp_to_s16_2ch_neon(uint8_t **dst, const uint8_t **src, int len){
+    swri_oldapi_conv_fltp_to_s16_2ch_neon((int16_t*)*dst, (float *const*)src, len, 2);
+}
+
+static void conv_fltp_to_s16_nch_neon(uint8_t **dst, const uint8_t **src, int len){
+    int channels;
+    for(channels=3; channels<SWR_CH_MAX && src[channels]; channels++)
+        ;
+    swri_oldapi_conv_fltp_to_s16_nch_neon((int16_t*)*dst, (float *const*)src, len, channels);
+}
+
+av_cold void swri_audio_convert_init_aarch64(struct AudioConvert *ac,
+                                       enum AVSampleFormat out_fmt,
+                                       enum AVSampleFormat in_fmt,
+                                       int channels)
+{
+    int cpu_flags = av_get_cpu_flags();
+
+    ac->simd_f= NULL;
+
+    if (have_neon(cpu_flags)) {
+        if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLT || out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_FLTP)
+            ac->simd_f = conv_flt_to_s16_neon;
+        if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLTP && channels == 2)
+            ac->simd_f = conv_fltp_to_s16_2ch_neon;
+        if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLTP && channels >  2)
+            ac->simd_f = conv_fltp_to_s16_nch_neon;
+        if(ac->simd_f)
+            ac->in_simd_align_mask = ac->out_simd_align_mask = 15;
+    }
+}
diff --git a/libswresample/aarch64/audio_convert_neon.S b/libswresample/aarch64/audio_convert_neon.S
new file mode 100644 (file)
index 0000000..74feff4
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/aarch64/asm.S"
+
+function swri_oldapi_conv_flt_to_s16_neon, export=1
+        subs            x2,  x2,  #8
+        ld1             {v0.4s}, [x1],  #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s}, [x1],  #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        b.eq            3f
+        ands            x12, x2,  #~15
+        b.eq            2f
+1:      subs            x12, x12, #16
+        sqrshrn         v4.4h,  v4.4s,  #16
+        ld1             {v2.4s}, [x1],  #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        ld1             {v3.4s}, [x1],  #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        sqrshrn         v6.4h,  v6.4s,  #16
+        st1             {v4.8h}, [x0],  #16
+        sqrshrn2        v6.8h,  v7.4s,  #16
+        ld1             {v0.4s}, [x1],  #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s}, [x1],  #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        st1             {v6.8h}, [x0],  #16
+        b.ne            1b
+        ands            x2,  x2,  #15
+        b.eq            3f
+2:      ld1             {v2.4s}, [x1],  #16
+        sqrshrn         v4.4h,  v4.4s,  #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s}, [x1],  #16
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        sqrshrn         v6.4h,  v6.4s,  #16
+        st1             {v4.8h}, [x0],  #16
+        sqrshrn2        v6.8h,  v7.4s,  #16
+        st1             {v6.8h}, [x0]
+        ret
+3:      sqrshrn         v4.4h,  v4.4s,  #16
+        sqrshrn2        v4.8h,  v5.4s,  #16
+        st1             {v4.8h}, [x0]
+        ret
+endfunc
+
+function swri_oldapi_conv_fltp_to_s16_2ch_neon, export=1
+        ldp             x4,  x5,  [x1]
+        subs            x2,  x2,  #8
+        ld1             {v0.4s},  [x4], #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        b.eq            3f
+        ands            x12, x2,  #~15
+        b.eq            2f
+1:      subs            x12, x12, #16
+        ld1             {v16.4s}, [x4], #16
+        fcvtzs          v20.4s, v16.4s, #31
+        sri             v6.4s,  v4.4s,  #16
+        ld1             {v17.4s}, [x4], #16
+        fcvtzs          v21.4s, v17.4s, #31
+        ld1             {v18.4s}, [x5], #16
+        fcvtzs          v22.4s, v18.4s, #31
+        ld1             {v19.4s}, [x5], #16
+        sri             v7.4s,  v5.4s,  #16
+        st1             {v6.4s},  [x0], #16
+        fcvtzs          v23.4s, v19.4s, #31
+        st1             {v7.4s},  [x0], #16
+        sri             v22.4s, v20.4s, #16
+        ld1             {v0.4s},  [x4], #16
+        sri             v23.4s, v21.4s, #16
+        st1             {v22.4s}, [x0], #16
+        fcvtzs          v4.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v5.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v6.4s,  v2.4s,  #31
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v7.4s,  v3.4s,  #31
+        st1             {v23.4s}, [x0], #16
+        b.ne            1b
+        ands            x2,  x2,  #15
+        b.eq            3f
+2:      sri             v6.4s,  v4.4s,  #16
+        ld1             {v0.4s},  [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},  [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        ld1             {v2.4s},  [x5], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        sri             v7.4s,  v5.4s,  #16
+        ld1             {v3.4s},  [x5], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        sri             v2.4s,  v0.4s,  #16
+        st1             {v6.4s,v7.4s},  [x0], #32
+        sri             v3.4s,  v1.4s,  #16
+        st1             {v2.4s,v3.4s},  [x0], #32
+        ret
+3:      sri             v6.4s,  v4.4s,  #16
+        sri             v7.4s,  v5.4s,  #16
+        st1             {v6.4s,v7.4s},  [x0]
+        ret
+endfunc
+
+function swri_oldapi_conv_fltp_to_s16_nch_neon, export=1
+        cmp             w3,  #2
+        b.eq            X(swri_oldapi_conv_fltp_to_s16_2ch_neon)
+        b.gt            1f
+        ldr             x1,  [x1]
+        b               X(swri_oldapi_conv_flt_to_s16_neon)
+1:
+        cmp             w3,  #4
+        lsl             x12, x3,  #1
+        b.lt            4f
+
+5:      // 4 channels
+        ldp             x4, x5, [x1], #16
+        ldp             x6, x7, [x1], #16
+        mov             w9,  w2
+        mov             x8,  x0
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        ld1             {v6.4s},        [x6], #16
+        fcvtzs          v6.4s, v6.4s, #31
+        ld1             {v7.4s},        [x7], #16
+        fcvtzs          v7.4s, v7.4s, #31
+6:
+        subs            w9,  w9,  #8
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        sri             v5.4s,  v4.4s,  #16
+        ld1             {v1.4s},        [x5], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        sri             v7.4s,  v6.4s,  #16
+        ld1             {v2.4s},        [x6], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        zip1            v16.4s, v5.4s,  v7.4s
+        ld1             {v3.4s},        [x7], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        zip2            v17.4s, v5.4s,  v7.4s
+        st1             {v16.d}[0],     [x8], x12
+        sri             v1.4s,  v0.4s,  #16
+        st1             {v16.d}[1],     [x8], x12
+        sri             v3.4s,  v2.4s,  #16
+        st1             {v17.d}[0],     [x8], x12
+        zip1            v18.4s, v1.4s,  v3.4s
+        st1             {v17.d}[1],     [x8], x12
+        zip2            v19.4s, v1.4s,  v3.4s
+        b.eq            7f
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v18.d}[0],     [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v18.d}[1],     [x8], x12
+        ld1             {v6.4s},    [x6], #16
+        fcvtzs          v6.4s, v6.4s, #31
+        st1             {v19.d}[0],     [x8], x12
+        ld1             {v7.4s},    [x7], #16
+        fcvtzs          v7.4s, v7.4s, #31
+        st1             {v19.d}[1],     [x8], x12
+        b               6b
+7:
+        st1             {v18.d}[0],     [x8], x12
+        st1             {v18.d}[1],     [x8], x12
+        st1             {v19.d}[0],     [x8], x12
+        st1             {v19.d}[1],     [x8], x12
+        subs            w3,  w3,  #4
+        b.eq            end
+        cmp             w3,  #4
+        add             x0,  x0,  #8
+        b.ge            5b
+
+4:      // 2 channels
+        cmp             w3,  #2
+        b.lt            4f
+        ldp             x4,  x5,  [x1], #16
+        mov             w9,  w2
+        mov             x8,  x0
+        tst             w9,  #8
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        ld1             {v6.4s},        [x4], #16
+        fcvtzs          v6.4s,  v6.4s,  #31
+        ld1             {v7.4s},        [x5], #16
+        fcvtzs          v7.4s,  v7.4s,  #31
+        b.eq            6f
+        subs            w9,  w9,  #8
+        b.eq            7f
+        sri             v5.4s,  v4.4s,  #16
+        ld1             {v4.4s},        [x4], #16
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v5.s}[0],      [x8], x12
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[1],      [x8], x12
+        ld1             {v6.4s},        [x4], #16
+        fcvtzs          v6.4s,  v6.4s, #31
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        st1             {v7.s}[0],      [x8], x12
+        st1             {v7.s}[1],      [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v7.s}[2],      [x8], x12
+        st1             {v7.s}[3],      [x8], x12
+        ld1             {v7.4s},        [x5], #16
+        fcvtzs          v7.4s,  v7.4s,  #31
+6:
+        subs            w9,  w9,  #16
+        ld1             {v0.4s},        [x4], #16
+        sri             v5.4s,  v4.4s,  #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x5], #16
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[0],      [x8], x12
+        st1             {v5.s}[1],      [x8], x12
+        fcvtzs          v1.4s,  v1.4s,  #31
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        ld1             {v2.4s},        [x4], #16
+        st1             {v7.s}[0],      [x8], x12
+        fcvtzs          v2.4s,  v2.4s,  #31
+        st1             {v7.s}[1],      [x8], x12
+        ld1             {v3.4s},        [x5], #16
+        st1             {v7.s}[2],      [x8], x12
+        fcvtzs          v3.4s,  v3.4s,  #31
+        st1             {v7.s}[3],      [x8], x12
+        sri             v1.4s,  v0.4s,  #16
+        sri             v3.4s,  v2.4s,  #16
+        b.eq            6f
+        ld1             {v4.4s},        [x4], #16
+        st1             {v1.s}[0],      [x8], x12
+        fcvtzs          v4.4s,  v4.4s,  #31
+        st1             {v1.s}[1],      [x8], x12
+        ld1             {v5.4s},        [x5], #16
+        st1             {v1.s}[2],      [x8], x12
+        fcvtzs          v5.4s,  v5.4s,  #31
+        st1             {v1.s}[3],      [x8], x12
+        ld1             {v6.4s},        [x4], #16
+        st1             {v3.s}[0],      [x8], x12
+        fcvtzs          v6.4s,  v6.4s,  #31
+        st1             {v3.s}[1],      [x8], x12
+        ld1             {v7.4s},        [x5], #16
+        st1             {v3.s}[2],      [x8], x12
+        fcvtzs          v7.4s,  v7.4s,  #31
+        st1             {v3.s}[3],      [x8], x12
+        b.gt            6b
+6:
+        st1             {v1.s}[0],      [x8], x12
+        st1             {v1.s}[1],      [x8], x12
+        st1             {v1.s}[2],      [x8], x12
+        st1             {v1.s}[3],      [x8], x12
+        st1             {v3.s}[0],      [x8], x12
+        st1             {v3.s}[1],      [x8], x12
+        st1             {v3.s}[2],      [x8], x12
+        st1             {v3.s}[3],      [x8], x12
+        b               8f
+7:
+        sri             v5.4s,  v4.4s,  #16
+        sri             v7.4s,  v6.4s,  #16
+        st1             {v5.s}[0],      [x8], x12
+        st1             {v5.s}[1],      [x8], x12
+        st1             {v5.s}[2],      [x8], x12
+        st1             {v5.s}[3],      [x8], x12
+        st1             {v7.s}[0],      [x8], x12
+        st1             {v7.s}[1],      [x8], x12
+        st1             {v7.s}[2],      [x8], x12
+        st1             {v7.s}[3],      [x8], x12
+8:
+        subs            w3,  w3,  #2
+        add             x0,  x0,  #4
+        b.eq            end
+
+4:      // 1 channel
+        ldr             x4,  [x1]
+        tst             w2,  #8
+        mov             w9,  w2
+        mov             x5,  x0
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        b.ne            8f
+6:
+        subs            w9,  w9,  #16
+        ld1             {v2.4s},        [x4], #16
+        fcvtzs          v2.4s,  v2.4s,  #31
+        ld1             {v3.4s},        [x4], #16
+        fcvtzs          v3.4s,  v3.4s,  #31
+        st1             {v0.h}[1],      [x5], x12
+        st1             {v0.h}[3],      [x5], x12
+        st1             {v0.h}[5],      [x5], x12
+        st1             {v0.h}[7],      [x5], x12
+        st1             {v1.h}[1],      [x5], x12
+        st1             {v1.h}[3],      [x5], x12
+        st1             {v1.h}[5],      [x5], x12
+        st1             {v1.h}[7],      [x5], x12
+        b.eq            7f
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+7:
+        st1             {v2.h}[1],      [x5], x12
+        st1             {v2.h}[3],      [x5], x12
+        st1             {v2.h}[5],      [x5], x12
+        st1             {v2.h}[7],      [x5], x12
+        st1             {v3.h}[1],      [x5], x12
+        st1             {v3.h}[3],      [x5], x12
+        st1             {v3.h}[5],      [x5], x12
+        st1             {v3.h}[7],      [x5], x12
+        b.gt            6b
+        ret
+8:
+        subs            w9,  w9,  #8
+        st1             {v0.h}[1],      [x5], x12
+        st1             {v0.h}[3],      [x5], x12
+        st1             {v0.h}[5],      [x5], x12
+        st1             {v0.h}[7],      [x5], x12
+        st1             {v1.h}[1],      [x5], x12
+        st1             {v1.h}[3],      [x5], x12
+        st1             {v1.h}[5],      [x5], x12
+        st1             {v1.h}[7],      [x5], x12
+        b.eq            end
+        ld1             {v0.4s},        [x4], #16
+        fcvtzs          v0.4s,  v0.4s,  #31
+        ld1             {v1.4s},        [x4], #16
+        fcvtzs          v1.4s,  v1.4s,  #31
+        b               6b
+end:
+        ret
+endfunc
diff --git a/libswresample/aarch64/neontest.c b/libswresample/aarch64/neontest.c
new file mode 100644 (file)
index 0000000..85c71bf
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * check NEON registers for clobbers
+ * Copyright (c) 2013 Martin Storsjo
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libswresample/swresample.h"
+#include "libavutil/aarch64/neontest.h"
+
+wrap(swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
+                 const uint8_t **in , int in_count))
+{
+    testneonclobbers(swr_convert, s, out, out_count, in, in_count);
+}
index 325bdf4..efdc9b5 100644 (file)
@@ -156,6 +156,7 @@ AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt,
 
     if(HAVE_YASM && HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);
     if(ARCH_ARM)              swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);
+    if(ARCH_AARCH64)          swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);
 
     return ctx;
 }
index 792768a..3761843 100644 (file)
@@ -188,6 +188,10 @@ void swri_rematrix_init_x86(struct SwrContext *s);
 void swri_get_dither(SwrContext *s, void *dst, int len, unsigned seed, enum AVSampleFormat noise_fmt);
 int swri_dither_init(SwrContext *s, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt);
 
+void swri_audio_convert_init_aarch64(struct AudioConvert *ac,
+                                 enum AVSampleFormat out_fmt,
+                                 enum AVSampleFormat in_fmt,
+                                 int channels);
 void swri_audio_convert_init_arm(struct AudioConvert *ac,
                                  enum AVSampleFormat out_fmt,
                                  enum AVSampleFormat in_fmt,