Merge commit '2ddb35b91131115c094d90e04031451023441b4d'
authorMichael Niedermayer <michaelni@gmx.at>
Sun, 6 Oct 2013 09:49:22 +0000 (11:49 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Sun, 6 Oct 2013 09:50:01 +0000 (11:50 +0200)
* commit '2ddb35b91131115c094d90e04031451023441b4d':
  x86: dsputil: Separate ff_add_hfyu_median_prediction_cmov from dsputil_mmx

Merged-by: Michael Niedermayer <michaelni@gmx.at>
1  2 
libavcodec/x86/Makefile
libavcodec/x86/dsputil_mmx.c
libavcodec/x86/dsputil_x86.c

Simple merge
Simple merge
index 0000000,144339b..f43b9d7
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,65 +1,65 @@@
 - * This file is part of Libav.
+ /*
+  * Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
+  *
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ #include "config.h"
+ #include "libavutil/x86/asm.h"
+ #include "dsputil_x86.h"
+ #if HAVE_INLINE_ASM
+ #if HAVE_7REGS
+ void ff_add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top,
+                                         const uint8_t *diff, int w,
+                                         int *left, int *left_top)
+ {
+     x86_reg w2 = -w;
+     x86_reg x;
+     int l  = *left     & 0xff;
+     int tl = *left_top & 0xff;
+     int t;
+     __asm__ volatile (
+         "mov          %7, %3            \n"
+         "1:                             \n"
+         "movzbl (%3, %4), %2            \n"
+         "mov          %2, %k3           \n"
+         "sub         %b1, %b3           \n"
+         "add         %b0, %b3           \n"
+         "mov          %2, %1            \n"
+         "cmp          %0, %2            \n"
+         "cmovg        %0, %2            \n"
+         "cmovg        %1, %0            \n"
+         "cmp         %k3, %0            \n"
+         "cmovg       %k3, %0            \n"
+         "mov          %7, %3            \n"
+         "cmp          %2, %0            \n"
+         "cmovl        %2, %0            \n"
+         "add    (%6, %4), %b0           \n"
+         "mov         %b0, (%5, %4)      \n"
+         "inc          %4                \n"
+         "jl           1b                \n"
+         : "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
+         : "r"(dst + w), "r"(diff + w), "rm"(top + w)
+     );
+     *left     = l;
+     *left_top = tl;
+ }
+ #endif
+ #endif /* HAVE_INLINE_ASM */