X-Git-Url: http://git.ffmpeg.org/gitweb/ffmpeg.git/blobdiff_plain/35690321a72348b8c45bdaa1618d98ad7e628b80..c51695dbf6e05b397ad8ef8e89d27723db5cb9f1:/libavcodec/arm/mathops.h diff --git a/libavcodec/arm/mathops.h b/libavcodec/arm/mathops.h index 2da9c1c..7c2acca 100644 --- a/libavcodec/arm/mathops.h +++ b/libavcodec/arm/mathops.h @@ -2,20 +2,20 @@ * simple math operations * Copyright (c) 2006 Michael Niedermayer et al * - * This file is part of FFmpeg. + * This file is part of Libav. * - * FFmpeg is free software; you can redistribute it and/or + * Libav is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * FFmpeg is distributed in the hope that it will be useful, + * Libav is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software + * License along with Libav; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -23,6 +23,7 @@ #define AVCODEC_ARM_MATHOPS_H #include +#include "config.h" #include "libavutil/common.h" #if HAVE_INLINE_ASM @@ -40,6 +41,8 @@ static inline av_const int MULL(int a, int b, unsigned shift) } #define MULH MULH +#define MUL64 MUL64 + #if HAVE_ARMV6 static inline av_const int MULH(int a, int b) { @@ -47,6 +50,13 @@ static inline av_const int MULH(int a, int b) __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); return r; } + +static inline av_const int64_t MUL64(int a, int b) +{ + int64_t x; + __asm__ ("smull %Q0, %R0, %1, %2" : "=r"(x) : "r"(a), "r"(b)); + return x; +} #else static inline av_const int MULH(int a, int b) { @@ -54,23 +64,19 @@ static inline av_const int MULH(int a, int b) __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a)); return hi; } -#endif static inline av_const int64_t MUL64(int a, int b) { - union { uint64_t x; unsigned hl[2]; } x; - __asm__ ("smull %0, %1, %2, %3" - : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b)); - return x.x; + int64_t x; + __asm__ ("smull %Q0, %R0, %1, %2" : "=&r"(x) : "r"(a), "r"(b)); + return x; } -#define MUL64 MUL64 +#endif static inline av_const int64_t MAC64(int64_t d, int a, int b) { - union { uint64_t x; unsigned hl[2]; } x = { d }; - __asm__ ("smlal %0, %1, %2, %3" - : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b)); - return x.x; + __asm__ ("smlal %Q0, %R0, %1, %2" : "+r"(d) : "r"(a), "r"(b)); + return d; } #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) #define MLS64(d, a, b) MAC64(d, -(a), b) @@ -96,7 +102,7 @@ static inline av_const int MUL16(int ra, int rb) static inline av_const int mid_pred(int a, int b, int c) { int m; - __asm__ volatile ( + __asm__ ( "mov %0, %2 \n\t" "cmp %1, %2 \n\t" "movgt %0, %1 \n\t" @@ -106,7 +112,8 @@ static inline av_const int mid_pred(int a, int b, int c) "cmp %0, %1 \n\t" "movgt %0, %1 \n\t" : "=&r"(m), "+r"(a) - : "r"(b), "r"(c)); + : "r"(b), "r"(c) + : "cc"); return m; }