Merge commit 'f8bbebecfd7ea3dceb7c96f931beca33f80a3490'
authorMichael Niedermayer <michaelni@gmx.at>
Fri, 14 Mar 2014 00:20:43 +0000 (01:20 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Fri, 14 Mar 2014 00:20:43 +0000 (01:20 +0100)
* commit 'f8bbebecfd7ea3dceb7c96f931beca33f80a3490':
  x86: motion_est: K&R formatting cosmetics

Conflicts:
libavcodec/x86/motion_est.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
libavcodec/x86/motion_est.c

@@@ -41,8 -41,8 +41,8 @@@ DECLARE_ASM_CONST(8, uint64_t, bone) = 
  
  static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  {
-     x86_reg len= -(x86_reg)stride*h;
-     __asm__ volatile(
 -    x86_reg len = -(stride * h);
++    x86_reg len = -(x86_reg)stride * h;
+     __asm__ volatile (
          ".p2align 4                     \n\t"
          "1:                             \n\t"
          "movq (%1, %%"REG_a"), %%mm0    \n\t"
@@@ -198,14 -193,14 +193,14 @@@ static inline void sad8_4_mmxext(uint8_
          "sub $2, %0                     \n\t"
          " jg 1b                         \n\t"
          : "+r" (h), "+r" (blk1), "+r" (blk2)
-         : "r" ((x86_reg)stride)
-     );
+         : "r" ((x86_reg) stride));
  }
  
- static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
+ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
+                               int stride, int h)
  {
-     x86_reg len= -(x86_reg)stride*h;
-     __asm__ volatile(
 -    x86_reg len = -(stride * h);
++    x86_reg len = -(x86_reg)stride * h;
+     __asm__ volatile (
          ".p2align 4                     \n\t"
          "1:                             \n\t"
          "movq (%1, %%"REG_a"), %%mm0    \n\t"
  
  static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  {
-     x86_reg len= -(x86_reg)stride*h;
-     __asm__ volatile(
-         "movq (%1, %%"REG_a"), %%mm0    \n\t"
 -    x86_reg len = -(stride * h);
++    x86_reg len = -(x86_reg)stride * h;
+     __asm__ volatile (
+         "movq  (%1, %%"REG_a"), %%mm0   \n\t"
          "movq 1(%1, %%"REG_a"), %%mm2   \n\t"
          "movq %%mm0, %%mm1              \n\t"
          "movq %%mm2, %%mm3              \n\t"
@@@ -321,112 -314,130 +314,130 @@@ static inline int sum_mmxext(void
  
  static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  {
-     sad8_2_mmx(blk1, blk1+1, blk2, stride, h);
+     sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
  }
  static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
  {
-     sad8_2_mmx(blk1, blk1+stride, blk2, stride, h);
+     sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
  }
  
- #define PIX_SAD(suf)\
- static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     av_assert2(h==8);\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t":);\
- \
-     sad8_1_ ## suf(blk1, blk2, stride, 8);\
- \
-     return sum_ ## suf();\
- }\
- static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     av_assert2(h==8);\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  "movq %0, %%mm5        \n\t"\
-                  :: "m"(round_tab[1]) \
-                  );\
- \
-     sad8_x2a_ ## suf(blk1, blk2, stride, 8);\
- \
-     return sum_ ## suf();\
- }\
- \
- static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     av_assert2(h==8);\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  "movq %0, %%mm5        \n\t"\
-                  :: "m"(round_tab[1]) \
-                  );\
- \
-     sad8_y2a_ ## suf(blk1, blk2, stride, 8);\
- \
-     return sum_ ## suf();\
- }\
- \
- static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     av_assert2(h==8);\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  ::);\
- \
-     sad8_4_ ## suf(blk1, blk2, stride, 8);\
- \
-     return sum_ ## suf();\
- }\
- \
- static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t":);\
- \
-     sad8_1_ ## suf(blk1  , blk2  , stride, h);\
-     sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
- \
-     return sum_ ## suf();\
- }\
- static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  "movq %0, %%mm5        \n\t"\
-                  :: "m"(round_tab[1]) \
-                  );\
- \
-     sad8_x2a_ ## suf(blk1  , blk2  , stride, h);\
-     sad8_x2a_ ## suf(blk1+8, blk2+8, stride, h);\
- \
-     return sum_ ## suf();\
- }\
- static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  "movq %0, %%mm5        \n\t"\
-                  :: "m"(round_tab[1]) \
-                  );\
- \
-     sad8_y2a_ ## suf(blk1  , blk2  , stride, h);\
-     sad8_y2a_ ## suf(blk1+8, blk2+8, stride, h);\
- \
-     return sum_ ## suf();\
- }\
- static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
- {\
-     __asm__ volatile("pxor %%mm7, %%mm7     \n\t"\
-                  "pxor %%mm6, %%mm6     \n\t"\
-                  ::);\
- \
-     sad8_4_ ## suf(blk1  , blk2  , stride, h);\
-     sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
- \
-     return sum_ ## suf();\
- }\
+ #define PIX_SAD(suf)                                                    \
+ static int sad8_ ## suf(void *v, uint8_t *blk2,                         \
+                         uint8_t *blk1, int stride, int h)               \
+ {                                                                       \
 -    assert(h == 8);                                                     \
++    av_assert2(h == 8);                                                     \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         :);                                                             \
+                                                                         \
+     sad8_1_ ## suf(blk1, blk2, stride, 8);                              \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad8_x2_ ## suf(void *v, uint8_t *blk2,                      \
+                            uint8_t *blk1, int stride, int h)            \
+ {                                                                       \
 -    assert(h == 8);                                                     \
++    av_assert2(h == 8);                                                     \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         "movq %0, %%mm5        \n\t"                                    \
+         :: "m" (round_tab[1]));                                         \
+                                                                         \
+     sad8_x2a_ ## suf(blk1, blk2, stride, 8);                            \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad8_y2_ ## suf(void *v, uint8_t *blk2,                      \
+                            uint8_t *blk1, int stride, int h)            \
+ {                                                                       \
 -    assert(h == 8);                                                     \
++    av_assert2(h == 8);                                                     \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         "movq %0, %%mm5        \n\t"                                    \
+         :: "m" (round_tab[1]));                                         \
+                                                                         \
+     sad8_y2a_ ## suf(blk1, blk2, stride, 8);                            \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad8_xy2_ ## suf(void *v, uint8_t *blk2,                     \
+                             uint8_t *blk1, int stride, int h)           \
+ {                                                                       \
 -    assert(h == 8);                                                     \
++    av_assert2(h == 8);                                                     \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         ::);                                                            \
+                                                                         \
+     sad8_4_ ## suf(blk1, blk2, stride, 8);                              \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad16_ ## suf(void *v, uint8_t *blk2,                        \
+                          uint8_t *blk1, int stride, int h)              \
+ {                                                                       \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         :);                                                             \
+                                                                         \
+     sad8_1_ ## suf(blk1,     blk2,     stride, h);                      \
+     sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h);                      \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad16_x2_ ## suf(void *v, uint8_t *blk2,                     \
+                             uint8_t *blk1, int stride, int h)           \
+ {                                                                       \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         "movq %0, %%mm5        \n\t"                                    \
+         :: "m" (round_tab[1]));                                         \
+                                                                         \
+     sad8_x2a_ ## suf(blk1,     blk2,     stride, h);                    \
+     sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h);                    \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad16_y2_ ## suf(void *v, uint8_t *blk2,                     \
+                             uint8_t *blk1, int stride, int h)           \
+ {                                                                       \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         "movq %0, %%mm5        \n\t"                                    \
+         :: "m" (round_tab[1]));                                         \
+                                                                         \
+     sad8_y2a_ ## suf(blk1,     blk2,     stride, h);                    \
+     sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h);                    \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
+                                                                         \
+ static int sad16_xy2_ ## suf(void *v, uint8_t *blk2,                    \
+                              uint8_t *blk1, int stride, int h)          \
+ {                                                                       \
+     __asm__ volatile (                                                  \
+         "pxor %%mm7, %%mm7     \n\t"                                    \
+         "pxor %%mm6, %%mm6     \n\t"                                    \
+         ::);                                                            \
+                                                                         \
+     sad8_4_ ## suf(blk1,     blk2,     stride, h);                      \
+     sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h);                      \
+                                                                         \
+     return sum_ ## suf();                                               \
+ }                                                                       \
  
  PIX_SAD(mmx)
  PIX_SAD(mmxext)
@@@ -467,8 -478,8 +478,8 @@@ av_cold void ff_dsputil_init_pix_mmx(DS
              c->pix_abs[1][3] = sad8_xy2_mmxext;
          }
      }
 -    if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW)) {
 +    if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
-         c->sad[0]= sad16_sse2;
+         c->sad[0] = sad16_sse2;
      }
  #endif /* HAVE_INLINE_ASM */
  }