Merge commit '10f4511f14a4e830c0ed471df4cd1cc2a18a481a'
authorJames Almer <jamrial@gmail.com>
Thu, 26 Oct 2017 17:06:34 +0000 (14:06 -0300)
committerJames Almer <jamrial@gmail.com>
Thu, 26 Oct 2017 17:06:34 +0000 (14:06 -0300)
* commit '10f4511f14a4e830c0ed471df4cd1cc2a18a481a':
  libavutil: Make LOCAL_ALIGNED(xx be equal to LOCAL_ALIGNED_xx(

Also added LOCAL_ALIGNED_4 as it's used in vp8 decoder, and
simplified the configure defines.

Merged-by: James Almer <jamrial@gmail.com>
1  2 
configure
libavutil/internal.h

diff --cc configure
+++ b/configure
@@@ -1881,9 -1517,9 +1881,7 @@@ ARCH_FEATURES=
      fast_64bit
      fast_clz
      fast_cmov
--    local_aligned_8
--    local_aligned_16
--    local_aligned_32
++    local_aligned
      simd_align_16
      simd_align_32
  "
@@@ -4661,98 -3704,6 +4659,98 @@@ elif enabled mips; the
  
      cpuflags="-march=$cpu"
  
-                 enable local_aligned_8 local_aligned_16 local_aligned_32
 +    if [ "$cpu" != "generic" ]; then
 +        disable mips32r2
 +        disable mips32r5
 +        disable mips64r2
 +        disable mips32r6
 +        disable mips64r6
 +        disable loongson2
 +        disable loongson3
 +
 +        case $cpu in
 +            24kc|24kf*|24kec|34kc|1004kc|24kef*|34kf*|1004kf*|74kc|74kf)
 +                enable mips32r2
 +                disable msa
 +            ;;
 +            p5600|i6400|p6600)
 +                disable mipsdsp
 +                disable mipsdspr2
 +            ;;
 +            loongson*)
 +                enable loongson2
 +                enable loongson3
++                enable local_aligned
 +                enable simd_align_16
 +                enable fast_64bit
 +                enable fast_clz
 +                enable fast_cmov
 +                enable fast_unaligned
 +                disable aligned_stack
 +                disable mipsfpu
 +                disable mipsdsp
 +                disable mipsdspr2
 +                case $cpu in
 +                    loongson3*)
 +                        cpuflags="-march=loongson3a -mhard-float -fno-expensive-optimizations"
 +                    ;;
 +                    loongson2e)
 +                        cpuflags="-march=loongson2e -mhard-float -fno-expensive-optimizations"
 +                    ;;
 +                    loongson2f)
 +                        cpuflags="-march=loongson2f -mhard-float -fno-expensive-optimizations"
 +                    ;;
 +                esac
 +            ;;
 +            *)
 +                # Unknown CPU. Disable everything.
 +                warn "unknown CPU. Disabling all MIPS optimizations."
 +                disable mipsfpu
 +                disable mipsdsp
 +                disable mipsdspr2
 +                disable msa
 +                disable mmi
 +            ;;
 +        esac
 +
 +        case $cpu in
 +            24kc)
 +                disable mipsfpu
 +                disable mipsdsp
 +                disable mipsdspr2
 +            ;;
 +            24kf*)
 +                disable mipsdsp
 +                disable mipsdspr2
 +            ;;
 +            24kec|34kc|1004kc)
 +                disable mipsfpu
 +                disable mipsdspr2
 +            ;;
 +            24kef*|34kf*|1004kf*)
 +                disable mipsdspr2
 +            ;;
 +            74kc)
 +                disable mipsfpu
 +            ;;
 +            p5600)
 +                enable mips32r5
 +                check_cflags "-mtune=p5600" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops"
 +            ;;
 +            i6400)
 +                enable mips64r6
 +                check_cflags "-mtune=i6400 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
 +            ;;
 +            p6600)
 +                enable mips64r6
 +                check_cflags "-mtune=p6600 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
 +            ;;
 +        esac
 +    else
 +        # We do not disable anything. Is up to the user to disable the unwanted features.
 +        warn 'generic cpu selected'
 +    fi
 +
  elif enabled ppc; then
  
      disable ldbrx
@@@ -5574,7 -4413,7 +5572,7 @@@ elif enabled parisc; the
  
  elif enabled ppc; then
  
--    enable local_aligned_8 local_aligned_16 local_aligned_32
++    enable local_aligned
  
      check_inline_asm dcbzl     '"dcbzl 0, %0" :: "r"(0)'
      check_inline_asm ibm_asm   '"add 0, 0, 0"'
@@@ -5615,7 -4454,7 +5613,7 @@@ elif enabled x86; the
      check_builtin rdtsc    intrin.h   "__rdtsc()"
      check_builtin mm_empty mmintrin.h "_mm_empty()"
  
--    enable local_aligned_8 local_aligned_16 local_aligned_32
++    enable local_aligned
  
      # check whether EBP is available on x86
      # As 'i' is stored on the stack, this program will crash
  #include <assert.h>
  #include "config.h"
  #include "attributes.h"
 +#include "timer.h"
 +#include "cpu.h"
  #include "dict.h"
  #include "macros.h"
+ #include "mem.h"
  #include "pixfmt.h"
 +#include "version.h"
  
  #if ARCH_X86
  #   include "x86/emms.h"
      DECLARE_ALIGNED(a, t, la_##v) s o;                  \
      t (*v) o = la_##v
  
- #define LOCAL_ALIGNED(a, t, v, ...) E1(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
+ #define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
  
--#if HAVE_LOCAL_ALIGNED_8
++#if HAVE_LOCAL_ALIGNED
++#   define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
++#else
++#   define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
++#endif
++
++#if HAVE_LOCAL_ALIGNED
  #   define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
  #else
- #   define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
+ #   define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
  #endif
  
--#if HAVE_LOCAL_ALIGNED_16
++#if HAVE_LOCAL_ALIGNED
  #   define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
  #else
- #   define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
+ #   define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
  #endif
  
--#if HAVE_LOCAL_ALIGNED_32
++#if HAVE_LOCAL_ALIGNED
  #   define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
  #else
- #   define LOCAL_ALIGNED_32(t, v, ...) LOCAL_ALIGNED(32, t, v, __VA_ARGS__)
+ #   define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
  #endif
  
  #define FF_ALLOC_OR_GOTO(ctx, p, size, label)\