adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
authoraurel <aurel@b3059339-0415-0410-9bf9-f77b7e298cf2>
Thu, 21 Oct 2004 11:55:20 +0000 (11:55 +0000)
committeraurel <aurel@b3059339-0415-0410-9bf9-f77b7e298cf2>
Thu, 21 Oct 2004 11:55:20 +0000 (11:55 +0000)
git-svn-id: svn://git.mplayerhq.hu/mplayer/trunk@13721 b3059339-0415-0410-9bf9-f77b7e298cf2

27 files changed:
bswap.h
configure
cpudetect.c
cpudetect.h
libmpcodecs/pullup.c
libmpcodecs/vf_decimate.c
libmpcodecs/vf_divtc.c
libmpcodecs/vf_eq.c
libmpcodecs/vf_eq2.c
libmpcodecs/vf_filmdint.c
libmpcodecs/vf_halfpack.c
libmpcodecs/vf_ilpack.c
libmpcodecs/vf_ivtc.c
libmpcodecs/vf_noise.c
libmpcodecs/vf_spp.c
libmpcodecs/vf_tfields.c
libvo/aclib.c
libvo/aclib_template.c
libvo/osd.c
libvo/osd_template.c
postproc/rgb2rgb.c
postproc/rgb2rgb_template.c
postproc/swscale-example.c
postproc/swscale.c
postproc/swscale_template.c
postproc/yuv2rgb.c
postproc/yuv2rgb_template.c

diff --git a/bswap.h b/bswap.h
index 864bedf..42cd640 100644 (file)
--- a/bswap.h
+++ b/bswap.h
@@ -7,17 +7,23 @@
 
 #include <inttypes.h>
 
-#ifdef ARCH_X86
-static inline unsigned short ByteSwap16(unsigned short x)
+#ifdef ARCH_X86_64
+#  define LEGACY_REGS "=Q"
+#else
+#  define LEGACY_REGS "=q"
+#endif
+
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
+static inline uint16_t ByteSwap16(uint16_t x)
 {
   __asm("xchgb %b0,%h0"        :
-        "=q" (x)       :
+        LEGACY_REGS (x)        :
         "0" (x));
     return x;
 }
 #define bswap_16(x) ByteSwap16(x)
 
-static inline unsigned int ByteSwap32(unsigned int x)
+static inline uint32_t ByteSwap32(uint32_t x)
 {
 #if __CPU__ > 386
  __asm("bswap  %0":
@@ -26,21 +32,28 @@ static inline unsigned int ByteSwap32(unsigned int x)
  __asm("xchgb  %b0,%h0\n"
       "        rorl    $16,%0\n"
       "        xchgb   %b0,%h0":
-      "=q" (x)         :
+      LEGACY_REGS (x)          :
 #endif
       "0" (x));
   return x;
 }
 #define bswap_32(x) ByteSwap32(x)
 
-static inline unsigned long long int ByteSwap64(unsigned long long int x)
+static inline uint64_t ByteSwap64(uint64_t x)
 {
+#ifdef ARCH_X86_64
+  __asm("bswap %0":
+        "=r" (x)     :
+        "0" (x));
+  return x;
+#else
   register union { __extension__ uint64_t __ll;
           uint32_t __l[2]; } __x;
   asm("xchgl   %0,%1":
       "=r"(__x.__l[0]),"=r"(__x.__l[1]):
       "0"(bswap_32((unsigned long)x)),"1"(bswap_32((unsigned long)(x>>32))));
   return __x.__ll;
+#endif
 }
 #define bswap_64(x) ByteSwap64(x)
 
index af59401..6ab921d 100755 (executable)
--- a/configure
+++ b/configure
@@ -456,7 +456,14 @@ if test -z "$_target" ; then
       case "`( uname -m ) 2>&1`" in
       i[3-9]86*|x86|x86pc|k5|k6|k6_2|k6_3|k6-2|k6-3|pentium*|athlon*|i586_i686|i586-i686|BePC) host_arch=i386 ;;
       ia64) host_arch=ia64 ;;
-      x86_64|amd64) host_arch=x86_64 ;;
+      x86_64|amd64)
+        if [ "`$_cc -dumpmachine | grep x86_64 | cut -d- -f1`" = "x86_64" -a \
+             -z "`echo $CFLAGS | grep -- -m32`"  ]; then
+          host_arch=x86_64
+        else
+          host_arch=i386
+        fi
+      ;;
       macppc|ppc) host_arch=ppc ;;
       alpha) host_arch=alpha ;;
       sparc) host_arch=sparc ;;
@@ -672,17 +679,8 @@ elif x86; then
   _cpuinfo="TOOLS/cpuinfo"
 fi
 
-case "$host_arch" in
-  i[3-9]86|x86|x86pc|k5|k6|k6-2|k6-3|pentium*|athlon*|i586-i686)
-  _def_arch="#define ARCH_X86 1"
-  _target_arch="TARGET_ARCH_X86 = yes"
-
-  pname=`$_cpuinfo | grep 'model name' | cut -d ':' -f 2 | head -1`
-  pvendor=`$_cpuinfo | grep 'vendor_id' | cut -d ':' -f 2  | cut -d ' ' -f 2 | head -1`
-  pfamily=`$_cpuinfo | grep 'cpu family' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-  pmodel=`$_cpuinfo | grep -v 'model name' | grep 'model' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-  pstepping=`$_cpuinfo | grep 'stepping' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
-
+x86_exts_check()
+{
   pparam=`$_cpuinfo | grep 'features' | cut -d ':' -f 2 | head -1`
   if test -z "$pparam" ; then
     pparam=`$_cpuinfo | grep 'flags' | cut -d ':' -f 2 | head -1`
@@ -707,6 +705,20 @@ case "$host_arch" in
     sse2)         _sse2=yes                ;;
     esac
   done
+}
+
+case "$host_arch" in
+  i[3-9]86|x86|x86pc|k5|k6|k6-2|k6-3|pentium*|athlon*|i586-i686)
+  _def_arch="#define ARCH_X86 1"
+  _target_arch="TARGET_ARCH_X86 = yes"
+
+  pname=`$_cpuinfo | grep 'model name' | cut -d ':' -f 2 | head -1`
+  pvendor=`$_cpuinfo | grep 'vendor_id' | cut -d ':' -f 2  | cut -d ' ' -f 2 | head -1`
+  pfamily=`$_cpuinfo | grep 'cpu family' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+  pmodel=`$_cpuinfo | grep -v 'model name' | grep 'model' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+  pstepping=`$_cpuinfo | grep 'stepping' | cut -d ':' -f 2 | cut -d ' ' -f 2 | head -1`
+
+  x86_exts_check
 
   echocheck "CPU vendor"
   echores "$pvendor ($pfamily:$pmodel:$pstepping)"
@@ -904,6 +916,7 @@ EOF
     _march=''
     _mcpu=''
     _optimizing=''
+    x86_exts_check
     ;;
 
   sparc)
index 9e05236..65d7e5e 100644 (file)
@@ -9,7 +9,7 @@ CpuCaps gCpuCaps;
 #endif
 #include <stdlib.h>
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 #include <stdio.h>
 #include <string.h>
@@ -47,25 +47,25 @@ static void check_os_katmai_support( void );
 // return TRUE if cpuid supported
 static int has_cpuid()
 {
-       int a, c;
+       long a, c;
 
 // code from libavcodec:
     __asm__ __volatile__ (
                           /* See if CPUID instruction is supported ... */
                           /* ... Get copies of EFLAGS into eax and ecx */
                           "pushf\n\t"
-                          "popl %0\n\t"
-                          "movl %0, %1\n\t"
+                          "pop %0\n\t"
+                          "mov %0, %1\n\t"
                           
                           /* ... Toggle the ID bit in one copy and store */
                           /*     to the EFLAGS reg */
-                          "xorl $0x200000, %0\n\t"
+                          "xor $0x200000, %0\n\t"
                           "push %0\n\t"
                           "popf\n\t"
                           
                           /* ... Get the (hopefully modified) EFLAGS */
                           "pushf\n\t"
-                          "popl %0\n\t"
+                          "pop %0\n\t"
                           : "=a" (a), "=c" (c)
                           :
                           : "cc" 
@@ -87,9 +87,9 @@ do_cpuid(unsigned int ax, unsigned int *p)
 #else
 // code from libavcodec:
     __asm __volatile
-       ("movl %%ebx, %%esi\n\t"
+       ("mov %%"REG_b", %%"REG_S"\n\t"
          "cpuid\n\t"
-         "xchgl %%ebx, %%esi"
+         "xchg %%"REG_b", %%"REG_S
          : "=a" (p[0]), "=S" (p[1]), 
            "=c" (p[2]), "=d" (p[3])
          : "0" (ax));
@@ -456,7 +456,7 @@ static void check_os_katmai_support( void )
    gCpuCaps.hasSSE=0;
 #endif /* __linux__ */
 }
-#else /* ARCH_X86 */
+#else /* ARCH_X86 || ARCH_X86_64 */
 
 #ifdef SYS_DARWIN
 #include <sys/sysctl.h>
@@ -536,10 +536,6 @@ void GetCpuCaps( CpuCaps *caps)
        mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Intel Itanium\n");
 #endif
 
-#ifdef ARCH_X86_64
-       mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Advanced Micro Devices 64-bit CPU\n");
-#endif
-
 #ifdef ARCH_SPARC
        mp_msg(MSGT_CPUDETECT,MSGL_INFO,"CPU: Sun Sparc\n");
 #endif
index c178eb7..997ec11 100644 (file)
@@ -6,6 +6,32 @@
 #define CPUTYPE_I586   5
 #define CPUTYPE_I686    6
 
+#ifdef ARCH_X86_64
+#  define REGa    rax
+#  define REGb    rbx
+#  define REGSP   rsp
+#  define REG_a  "rax"
+#  define REG_b  "rbx"
+#  define REG_c  "rcx"
+#  define REG_d  "rdx"
+#  define REG_S  "rsi"
+#  define REG_D  "rdi"
+#  define REG_SP "rsp"
+#  define REG_BP "rbp"
+#else
+#  define REGa    eax
+#  define REGb    ebx
+#  define REGSP   esp
+#  define REG_a  "eax"
+#  define REG_b  "ebx"
+#  define REG_c  "ecx"
+#  define REG_d  "edx"
+#  define REG_S  "esi"
+#  define REG_D  "edi"
+#  define REG_SP "esp"
+#  define REG_BP "ebp"
+#endif
+
 typedef struct cpucaps_s {
        int cpuType;
        int cpuStepping;
index 2abaa15..3627084 100644 (file)
@@ -8,6 +8,7 @@
 
 
 
+#ifdef ARCH_X86
 #ifdef HAVE_MMX
 static int diff_y_mmx(unsigned char *a, unsigned char *b, int s)
 {
@@ -147,6 +148,7 @@ static int licomb_y_mmx(unsigned char *a, unsigned char *b, int s)
        return ret;
 }
 #endif
+#endif
 
 #define ABS(a) (((a)^((a)>>31))-((a)>>31))
 
@@ -682,12 +684,14 @@ void pullup_init_context(struct pullup_context *c)
        case PULLUP_FMT_Y:
                c->diff = diff_y;
                c->comb = licomb_y;
+#ifdef ARCH_X86
 #ifdef HAVE_MMX
                if (c->cpu & PULLUP_CPU_MMX) {
                        c->diff = diff_y_mmx;
                        c->comb = licomb_y_mmx;
                }
 #endif
+#endif
                /* c->comb = qpcomb_y; */
                break;
 #if 0
index 7cc8d2a..1a80e9d 100644 (file)
@@ -31,11 +31,11 @@ static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns)
                ".balign 16 \n\t"
                "1: \n\t"
                
-               "movq (%%esi), %%mm0 \n\t"
-               "movq (%%esi), %%mm2 \n\t"
-               "addl %%eax, %%esi \n\t"
-               "movq (%%edi), %%mm1 \n\t"
-               "addl %%ebx, %%edi \n\t"
+               "movq (%%"REG_S"), %%mm0 \n\t"
+               "movq (%%"REG_S"), %%mm2 \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "movq (%%"REG_D"), %%mm1 \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
                "psubusb %%mm1, %%mm2 \n\t"
                "psubusb %%mm0, %%mm1 \n\t"
                "movq %%mm2, %%mm0 \n\t"
@@ -51,10 +51,10 @@ static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns)
                
                "decl %%ecx \n\t"
                "jnz 1b \n\t"
-               "movq %%mm4, (%%edx) \n\t"
+               "movq %%mm4, (%%"REG_d") \n\t"
                "emms \n\t"
                : 
-               : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+               : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
                : "memory"
                );
        return out[0]+out[1]+out[2]+out[3];
index e17600e..d3f287a 100644 (file)
@@ -44,11 +44,11 @@ static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns)
        ".balign 16 \n\t"
        "1: \n\t"
 
-       "movq (%%esi), %%mm0 \n\t"
-       "movq (%%esi), %%mm2 \n\t"
-       "addl %%eax, %%esi \n\t"
-       "movq (%%edi), %%mm1 \n\t"
-       "addl %%ebx, %%edi \n\t"
+       "movq (%%"REG_S"), %%mm0 \n\t"
+       "movq (%%"REG_S"), %%mm2 \n\t"
+       "add %%"REG_a", %%"REG_S" \n\t"
+       "movq (%%"REG_D"), %%mm1 \n\t"
+       "add %%"REG_b", %%"REG_D" \n\t"
        "psubusb %%mm1, %%mm2 \n\t"
        "psubusb %%mm0, %%mm1 \n\t"
        "movq %%mm2, %%mm0 \n\t"
@@ -64,10 +64,10 @@ static int diff_MMX(unsigned char *old, unsigned char *new, int os, int ns)
 
        "decl %%ecx \n\t"
        "jnz 1b \n\t"
-       "movq %%mm4, (%%edx) \n\t"
+       "movq %%mm4, (%%"REG_d") \n\t"
        "emms \n\t"
        :
-       : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+       : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
        : "memory"
        );
    return out[0]+out[1]+out[2]+out[3];
index 74395f6..d7adeea 100644 (file)
@@ -64,9 +64,9 @@ static void process_MMX(unsigned char *dest, int dstride, unsigned char *src, in
                        "paddw %%mm3, %%mm1 \n\t"
                        "paddw %%mm3, %%mm2 \n\t"
                        "packuswb %%mm2, %%mm1 \n\t"
-                       "addl $8, %0 \n\t"
+                       "add $8, %0 \n\t"
                        "movq %%mm1, (%1) \n\t"
-                       "addl $8, %1 \n\t"
+                       "add $8, %1 \n\t"
                        "decl %%eax \n\t"
                        "jnz 1b \n\t"
                        : "=r" (src), "=r" (dest)
index 123bcea..f2641f9 100644 (file)
@@ -152,9 +152,9 @@ void affine_1d_MMX (eq2_param_t *par, unsigned char *dst, unsigned char *src,
       "paddw %%mm3, %%mm1 \n\t"
       "paddw %%mm3, %%mm2 \n\t"
       "packuswb %%mm2, %%mm1 \n\t"
-      "addl $8, %0 \n\t"
+      "add $8, %0 \n\t"
       "movq %%mm1, (%1) \n\t"
-      "addl $8, %1 \n\t"
+      "add $8, %1 \n\t"
       "decl %%eax \n\t"
       "jnz 1b \n\t"
       : "=r" (src), "=r" (dst)
index 27a527c..90e25b8 100644 (file)
@@ -406,8 +406,8 @@ block_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs,
            "psllq $16, %%mm0\n\t"                                           \
            "paddusw %%mm0, %%mm7\n\t"                                       \
            "movq (%1), %%mm4\n\t"                                           \
-           "leal (%0,%2,2), %0\n\t"                                         \
-           "leal (%1,%3,2), %1\n\t"                                         \
+           "lea (%0,%2,2), %0\n\t"                                          \
+           "lea (%1,%3,2), %1\n\t"                                          \
            "psubusb %4, %%mm4\n\t"                                          \
            PAVGB(%%mm2, %%mm4)                                              \
            PAVGB(%%mm2, %%mm4)    /* mm4 = qup odd */                       \
@@ -440,7 +440,7 @@ block_metrics_faster_c(unsigned char *a, unsigned char *b, int as, int bs,
            "paddusw %%mm2, %%mm7\n\t"                                       \
            "paddusw %%mm1, %%mm7\n\t"                                       \
            : "=r" (a), "=r" (b)                                             \
-           : "r"(as), "r"(bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \
+           : "r"((long)as), "r"((long)bs), "m" (ones), "0"(a), "1"(b), "X"(*a), "X"(*b) \
            );                                                               \
     } while (--lines);
 
@@ -650,7 +650,7 @@ dint_copy_line_mmx2(unsigned char *dst, unsigned char *a, long bos,
            "por %%mm3, %%mm1 \n\t"     /* avg if >= threshold */
            "movq %%mm1, (%2,%4) \n\t"
            : /* no output */
-           : "r" (a), "r" (bos), "r" (dst), "r" (ss), "r" (ds), "r" (cos)
+           : "r" (a), "r" (bos), "r" (dst), "r" ((long)ss), "r" ((long)ds), "r" (cos)
            );
        a += 8;
        dst += 8;
index b4fc0e6..900aed6 100644 (file)
@@ -75,13 +75,13 @@ static void halfpack_MMX(unsigned char *dst, unsigned char *src[3],
                        "por %%mm5, %%mm1 \n\t"
                        "por %%mm6, %%mm2 \n\t"
 
-                       "addl $8, %0 \n\t"
-                       "addl $8, %1 \n\t"
-                       "addl $4, %2 \n\t"
-                       "addl $4, %3 \n\t"
+                       "add $8, %0 \n\t"
+                       "add $8, %1 \n\t"
+                       "add $4, %2 \n\t"
+                       "add $4, %3 \n\t"
                        "movq %%mm1, (%8) \n\t"
                        "movq %%mm2, 8(%8) \n\t"
-                       "addl $16, %8 \n\t"
+                       "add $16, %8 \n\t"
                        "decl %9 \n\t"
                        "jnz 1b \n\t"
                        : "=r" (y1), "=r" (y2), "=r" (u), "=r" (v)
index 66bad26..43c6bad 100644 (file)
@@ -76,12 +76,12 @@ static void pack_nn_MMX(unsigned char *dst, unsigned char *y,
                "punpcklbw %%mm4, %%mm1 \n\t"
                "punpckhbw %%mm4, %%mm2 \n\t"
                
-               "addl $8, %0 \n\t"
-               "addl $4, %1 \n\t"
-               "addl $4, %2 \n\t"
+               "add $8, %0 \n\t"
+               "add $4, %1 \n\t"
+               "add $4, %2 \n\t"
                "movq %%mm1, (%3) \n\t"
                "movq %%mm2, 8(%3) \n\t"
-               "addl $16, %3 \n\t"
+               "add $16, %3 \n\t"
                "decl %4 \n\t"
                "jnz 1b \n\t"
                "emms \n\t"
@@ -96,22 +96,26 @@ static void pack_li_0_MMX(unsigned char *dst, unsigned char *y,
        unsigned char *u, unsigned char *v, int w, int us, int vs)
 {
        asm volatile (""
-               "pushl %%ebp \n\t"
-               "movl 4(%%edx), %%ebp \n\t"
-               "movl (%%edx), %%edx \n\t"
+               "push %%"REG_BP" \n\t"
+#ifdef ARCH_X86_64
+               "mov %6, %%"REG_BP" \n\t"
+#else
+               "movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+               "movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
                "pxor %%mm0, %%mm0 \n\t"
                
                ".balign 16 \n\t"
                ".Lli0: \n\t"
-               "movq (%%esi), %%mm1 \n\t"
-               "movq (%%esi), %%mm2 \n\t"
+               "movq (%%"REG_S"), %%mm1 \n\t"
+               "movq (%%"REG_S"), %%mm2 \n\t"
                
-               "movq (%%eax,%%edx,2), %%mm4 \n\t"
-               "movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+               "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+               "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
                "punpcklbw %%mm0, %%mm4 \n\t"
                "punpcklbw %%mm0, %%mm6 \n\t"
-               "movq (%%eax), %%mm3 \n\t"
-               "movq (%%ebx), %%mm5 \n\t"
+               "movq (%%"REG_a"), %%mm3 \n\t"
+               "movq (%%"REG_b"), %%mm5 \n\t"
                "punpcklbw %%mm0, %%mm3 \n\t"
                "punpcklbw %%mm0, %%mm5 \n\t"
                "paddw %%mm3, %%mm4 \n\t"
@@ -136,18 +140,18 @@ static void pack_li_0_MMX(unsigned char *dst, unsigned char *y,
                "punpcklbw %%mm4, %%mm1 \n\t"
                "punpckhbw %%mm4, %%mm2 \n\t"
                
-               "movq %%mm1, (%%edi) \n\t"
-               "movq %%mm2, 8(%%edi) \n\t"
+               "movq %%mm1, (%%"REG_D") \n\t"
+               "movq %%mm2, 8(%%"REG_D") \n\t"
                
-               "movq 8(%%esi), %%mm1 \n\t"
-               "movq 8(%%esi), %%mm2 \n\t"
+               "movq 8(%%"REG_S"), %%mm1 \n\t"
+               "movq 8(%%"REG_S"), %%mm2 \n\t"
                
-               "movq (%%eax,%%edx,2), %%mm4 \n\t"
-               "movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+               "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+               "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
                "punpckhbw %%mm0, %%mm4 \n\t"
                "punpckhbw %%mm0, %%mm6 \n\t"
-               "movq (%%eax), %%mm3 \n\t"
-               "movq (%%ebx), %%mm5 \n\t"
+               "movq (%%"REG_a"), %%mm3 \n\t"
+               "movq (%%"REG_b"), %%mm5 \n\t"
                "punpckhbw %%mm0, %%mm3 \n\t"
                "punpckhbw %%mm0, %%mm5 \n\t"
                "paddw %%mm3, %%mm4 \n\t"
@@ -172,20 +176,25 @@ static void pack_li_0_MMX(unsigned char *dst, unsigned char *y,
                "punpcklbw %%mm4, %%mm1 \n\t"
                "punpckhbw %%mm4, %%mm2 \n\t"
                
-               "addl $16, %%esi \n\t"
-               "addl $8, %%eax \n\t"
-               "addl $8, %%ebx \n\t"
+               "add $16, %%"REG_S" \n\t"
+               "add $8, %%"REG_a" \n\t"
+               "add $8, %%"REG_b" \n\t"
                
-               "movq %%mm1, 16(%%edi) \n\t"
-               "movq %%mm2, 24(%%edi) \n\t"
-               "addl $32, %%edi \n\t"
+               "movq %%mm1, 16(%%"REG_D") \n\t"
+               "movq %%mm2, 24(%%"REG_D") \n\t"
+               "add $32, %%"REG_D" \n\t"
                
                "decl %%ecx \n\t"
                "jnz .Lli0 \n\t"
                "emms \n\t"
-               "popl %%ebp \n\t"
+               "pop %%"REG_BP" \n\t"
                : 
-               : "S" (y), "D" (dst), "a" (u), "b" (v), "d" (&us), "c" (w/16)
+               : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#ifdef ARCH_X86_64
+               "d" ((long)us), "r" ((long)vs)
+#else
+               "d" (&us)
+#endif
                : "memory"
                );
        pack_li_0_C(dst, y, u, v, (w&15), us, vs);
@@ -195,22 +204,26 @@ static void pack_li_1_MMX(unsigned char *dst, unsigned char *y,
        unsigned char *u, unsigned char *v, int w, int us, int vs)
 {
        asm volatile (""
-               "pushl %%ebp \n\t"
-               "movl 4(%%edx), %%ebp \n\t"
-               "movl (%%edx), %%edx \n\t"
+               "push %%"REG_BP" \n\t"
+#ifdef ARCH_X86_64
+               "mov %6, %%"REG_BP" \n\t"
+#else
+               "movl 4(%%"REG_d"), %%"REG_BP" \n\t"
+               "movl (%%"REG_d"), %%"REG_d" \n\t"
+#endif
                "pxor %%mm0, %%mm0 \n\t"
                
                ".balign 16 \n\t"
                ".Lli1: \n\t"
-               "movq (%%esi), %%mm1 \n\t"
-               "movq (%%esi), %%mm2 \n\t"
+               "movq (%%"REG_S"), %%mm1 \n\t"
+               "movq (%%"REG_S"), %%mm2 \n\t"
                
-               "movq (%%eax,%%edx,2), %%mm4 \n\t"
-               "movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+               "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+               "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
                "punpcklbw %%mm0, %%mm4 \n\t"
                "punpcklbw %%mm0, %%mm6 \n\t"
-               "movq (%%eax), %%mm3 \n\t"
-               "movq (%%ebx), %%mm5 \n\t"
+               "movq (%%"REG_a"), %%mm3 \n\t"
+               "movq (%%"REG_b"), %%mm5 \n\t"
                "punpcklbw %%mm0, %%mm3 \n\t"
                "punpcklbw %%mm0, %%mm5 \n\t"
                "movq %%mm4, %%mm7 \n\t"
@@ -237,18 +250,18 @@ static void pack_li_1_MMX(unsigned char *dst, unsigned char *y,
                "punpcklbw %%mm4, %%mm1 \n\t"
                "punpckhbw %%mm4, %%mm2 \n\t"
                
-               "movq %%mm1, (%%edi) \n\t"
-               "movq %%mm2, 8(%%edi) \n\t"
+               "movq %%mm1, (%%"REG_D") \n\t"
+               "movq %%mm2, 8(%%"REG_D") \n\t"
                
-               "movq 8(%%esi), %%mm1 \n\t"
-               "movq 8(%%esi), %%mm2 \n\t"
+               "movq 8(%%"REG_S"), %%mm1 \n\t"
+               "movq 8(%%"REG_S"), %%mm2 \n\t"
                
-               "movq (%%eax,%%edx,2), %%mm4 \n\t"
-               "movq (%%ebx,%%ebp,2), %%mm6 \n\t"
+               "movq (%%"REG_a",%%"REG_d",2), %%mm4 \n\t"
+               "movq (%%"REG_b",%%"REG_BP",2), %%mm6 \n\t"
                "punpckhbw %%mm0, %%mm4 \n\t"
                "punpckhbw %%mm0, %%mm6 \n\t"
-               "movq (%%eax), %%mm3 \n\t"
-               "movq (%%ebx), %%mm5 \n\t"
+               "movq (%%"REG_a"), %%mm3 \n\t"
+               "movq (%%"REG_b"), %%mm5 \n\t"
                "punpckhbw %%mm0, %%mm3 \n\t"
                "punpckhbw %%mm0, %%mm5 \n\t"
                "movq %%mm4, %%mm7 \n\t"
@@ -275,20 +288,25 @@ static void pack_li_1_MMX(unsigned char *dst, unsigned char *y,
                "punpcklbw %%mm4, %%mm1 \n\t"
                "punpckhbw %%mm4, %%mm2 \n\t"
                
-               "addl $16, %%esi \n\t"
-               "addl $8, %%eax \n\t"
-               "addl $8, %%ebx \n\t"
+               "add $16, %%"REG_S" \n\t"
+               "add $8, %%"REG_a" \n\t"
+               "add $8, %%"REG_b" \n\t"
                
-               "movq %%mm1, 16(%%edi) \n\t"
-               "movq %%mm2, 24(%%edi) \n\t"
-               "addl $32, %%edi \n\t"
+               "movq %%mm1, 16(%%"REG_D") \n\t"
+               "movq %%mm2, 24(%%"REG_D") \n\t"
+               "add $32, %%"REG_D" \n\t"
                
                "decl %%ecx \n\t"
                "jnz .Lli1 \n\t"
                "emms \n\t"
-               "popl %%ebp \n\t"
+               "pop %%"REG_BP" \n\t"
                : 
-               : "S" (y), "D" (dst), "a" (u), "b" (v), "d" (&us), "c" (w/16)
+               : "S" (y), "D" (dst), "a" (u), "b" (v), "c" (w/16),
+#ifdef ARCH_X86_64
+               "d" ((long)us), "r" ((long)vs)
+#else
+               "d" (&us)
+#endif
                : "memory"
                );
        pack_li_1_C(dst, y, u, v, (w&15), us, vs);
index 804f68a..3fb00e5 100644 (file)
@@ -71,11 +71,11 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                "1: \n\t"
                
                // Even difference
-               "movq (%%esi), %%mm0 \n\t"
-               "movq (%%esi), %%mm2 \n\t"
-               "addl %%eax, %%esi \n\t"
-               "movq (%%edi), %%mm1 \n\t"
-               "addl %%ebx, %%edi \n\t"
+               "movq (%%"REG_S"), %%mm0 \n\t"
+               "movq (%%"REG_S"), %%mm2 \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "movq (%%"REG_D"), %%mm1 \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
                "psubusb %%mm1, %%mm2 \n\t"
                "psubusb %%mm0, %%mm1 \n\t"
                "movq %%mm2, %%mm0 \n\t"
@@ -90,11 +90,11 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                "paddw %%mm3, %%mm4 \n\t"
                
                // Odd difference
-               "movq (%%esi), %%mm0 \n\t"
-               "movq (%%esi), %%mm2 \n\t"
-               "addl %%eax, %%esi \n\t"
-               "movq (%%edi), %%mm1 \n\t"
-               "addl %%ebx, %%edi \n\t"
+               "movq (%%"REG_S"), %%mm0 \n\t"
+               "movq (%%"REG_S"), %%mm2 \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "movq (%%"REG_D"), %%mm1 \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
                "psubusb %%mm1, %%mm2 \n\t"
                "psubusb %%mm0, %%mm1 \n\t"
                "movq %%mm2, %%mm0 \n\t"
@@ -110,8 +110,8 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                        
                "decl %%ecx \n\t"
                "jnz 1b \n\t"
-               "movq %%mm4, (%%edx) \n\t"
-               "movq %%mm5, 8(%%edx) \n\t"
+               "movq %%mm4, (%%"REG_d") \n\t"
+               "movq %%mm5, 8(%%"REG_d") \n\t"
                : 
                : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
                : "memory"
@@ -130,14 +130,14 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                ".balign 16 \n\t"
                "2: \n\t"
                
-               "movq (%%esi), %%mm0 \n\t"
-               "movq (%%esi,%%eax), %%mm1 \n\t"
-               "addl %%eax, %%esi \n\t"
-               "addl %%eax, %%esi \n\t"
-               "movq (%%edi), %%mm2 \n\t"
-               "movq (%%edi,%%ebx), %%mm3 \n\t"
-               "addl %%ebx, %%edi \n\t"
-               "addl %%ebx, %%edi \n\t"
+               "movq (%%"REG_S"), %%mm0 \n\t"
+               "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "movq (%%"REG_D"), %%mm2 \n\t"
+               "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
                "punpcklbw %%mm7, %%mm0 \n\t"
                "punpcklbw %%mm7, %%mm1 \n\t"
                "punpcklbw %%mm7, %%mm2 \n\t"
@@ -164,16 +164,16 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                "psubw %%mm1, %%mm4 \n\t"
                "psubw %%mm2, %%mm5 \n\t"
                "psubw %%mm3, %%mm6 \n\t"
-               "movq %%mm4, (%%edx) \n\t"
-               "movq %%mm5, 16(%%edx) \n\t"
-               "movq %%mm6, 32(%%edx) \n\t"
+               "movq %%mm4, (%%"REG_d") \n\t"
+               "movq %%mm5, 16(%%"REG_d") \n\t"
+               "movq %%mm6, 32(%%"REG_d") \n\t"
 
-               "movl %%eax, %%ecx \n\t"
-               "shll $3, %%ecx \n\t"
-               "subl %%ecx, %%esi \n\t"
-               "movl %%ebx, %%ecx \n\t"
-               "shll $3, %%ecx \n\t"
-               "subl %%ecx, %%edi \n\t"
+               "mov %%"REG_a", %%"REG_c" \n\t"
+               "shl $3, %%"REG_c" \n\t"
+               "sub %%"REG_c", %%"REG_S" \n\t"
+               "mov %%"REG_b", %%"REG_c" \n\t"
+               "shl $3, %%"REG_c" \n\t"
+               "sub %%"REG_c", %%"REG_D" \n\t"
 
                // Second loop for the last four columns
                "movl $4, %%ecx \n\t"
@@ -184,14 +184,14 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                ".balign 16 \n\t"
                "3: \n\t"
                
-               "movq (%%esi), %%mm0 \n\t"
-               "movq (%%esi,%%eax), %%mm1 \n\t"
-               "addl %%eax, %%esi \n\t"
-               "addl %%eax, %%esi \n\t"
-               "movq (%%edi), %%mm2 \n\t"
-               "movq (%%edi,%%ebx), %%mm3 \n\t"
-               "addl %%ebx, %%edi \n\t"
-               "addl %%ebx, %%edi \n\t"
+               "movq (%%"REG_S"), %%mm0 \n\t"
+               "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "add %%"REG_a", %%"REG_S" \n\t"
+               "movq (%%"REG_D"), %%mm2 \n\t"
+               "movq (%%"REG_D",%%"REG_b"), %%mm3 \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
+               "add %%"REG_b", %%"REG_D" \n\t"
                "punpckhbw %%mm7, %%mm0 \n\t"
                "punpckhbw %%mm7, %%mm1 \n\t"
                "punpckhbw %%mm7, %%mm2 \n\t"
@@ -218,13 +218,13 @@ static void block_diffs_MMX(struct metrics *m, unsigned char *old, unsigned char
                "psubw %%mm1, %%mm4 \n\t"
                "psubw %%mm2, %%mm5 \n\t"
                "psubw %%mm3, %%mm6 \n\t"
-               "movq %%mm4, 8(%%edx) \n\t"
-               "movq %%mm5, 24(%%edx) \n\t"
-               "movq %%mm6, 40(%%edx) \n\t"
+               "movq %%mm4, 8(%%"REG_d") \n\t"
+               "movq %%mm5, 24(%%"REG_d") \n\t"
+               "movq %%mm6, 40(%%"REG_d") \n\t"
 
                "emms \n\t"
                : 
-               : "S" (old), "D" (new), "a" (os), "b" (ns), "d" (out)
+               : "S" (old), "D" (new), "a" ((long)os), "b" ((long)ns), "d" (out)
                : "memory"
                );
        m->p = m->t = m->s = 0;
index 14ad8f9..c8f669b 100644 (file)
@@ -143,26 +143,26 @@ static int8_t *initNoise(FilterParam *fp){
 
 #ifdef HAVE_MMX
 static inline void lineNoise_MMX(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
-       int mmx_len= len&(~7);
+       long mmx_len= len&(~7);
        noise+=shift;
 
        asm volatile(
-               "movl %3, %%eax                 \n\t"
+               "mov %3, %%"REG_a"              \n\t"
                "pcmpeqb %%mm7, %%mm7           \n\t"
                "psllw $15, %%mm7               \n\t"
                "packsswb %%mm7, %%mm7          \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               "movq (%0, %%eax), %%mm0        \n\t"
-               "movq (%1, %%eax), %%mm1        \n\t"
+               "movq (%0, %%"REG_a"), %%mm0    \n\t"
+               "movq (%1, %%"REG_a"), %%mm1    \n\t"
                "pxor %%mm7, %%mm0              \n\t"
                "paddsb %%mm1, %%mm0            \n\t"
                "pxor %%mm7, %%mm0              \n\t"
-               "movq %%mm0, (%2, %%eax)        \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movq %%mm0, (%2, %%"REG_a")    \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
                :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
-               : "%eax"
+               : "%"REG_a
        );
        if(mmx_len!=len)
                lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
@@ -172,26 +172,26 @@ static inline void lineNoise_MMX(uint8_t *dst, uint8_t *src, int8_t *noise, int
 //duplicate of previous except movntq
 #ifdef HAVE_MMX2
 static inline void lineNoise_MMX2(uint8_t *dst, uint8_t *src, int8_t *noise, int len, int shift){
-       int mmx_len= len&(~7);
+       long mmx_len= len&(~7);
        noise+=shift;
 
        asm volatile(
-               "movl %3, %%eax                 \n\t"
+               "mov %3, %%"REG_a"              \n\t"
                "pcmpeqb %%mm7, %%mm7           \n\t"
                "psllw $15, %%mm7               \n\t"
                "packsswb %%mm7, %%mm7          \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               "movq (%0, %%eax), %%mm0        \n\t"
-               "movq (%1, %%eax), %%mm1        \n\t"
+               "movq (%0, %%"REG_a"), %%mm0    \n\t"
+               "movq (%1, %%"REG_a"), %%mm1    \n\t"
                "pxor %%mm7, %%mm0              \n\t"
                "paddsb %%mm1, %%mm0            \n\t"
                "pxor %%mm7, %%mm0              \n\t"
-               "movntq %%mm0, (%2, %%eax)      \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movntq %%mm0, (%2, %%"REG_a")  \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
                :: "r" (src+mmx_len), "r" (noise+mmx_len), "r" (dst+mmx_len), "g" (-mmx_len)
-               : "%eax"
+               : "%"REG_a
        );
        if(mmx_len!=len)
                lineNoise_C(dst+mmx_len, src+mmx_len, noise+mmx_len, len-mmx_len, 0);
@@ -214,16 +214,16 @@ static inline void lineNoise_C(uint8_t *dst, uint8_t *src, int8_t *noise, int le
 
 #ifdef HAVE_MMX
 static inline void lineNoiseAvg_MMX(uint8_t *dst, uint8_t *src, int len, int8_t **shift){
-       int mmx_len= len&(~7);
+       long mmx_len= len&(~7);
 
        asm volatile(
-               "movl %5, %%eax                 \n\t"
+               "mov %5, %%"REG_a"              \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               "movq (%1, %%eax), %%mm1        \n\t"
-               "movq (%0, %%eax), %%mm0        \n\t"
-               "paddb (%2, %%eax), %%mm1       \n\t"
-               "paddb (%3, %%eax), %%mm1       \n\t"
+               "movq (%1, %%"REG_a"), %%mm1    \n\t"
+               "movq (%0, %%"REG_a"), %%mm0    \n\t"
+               "paddb (%2, %%"REG_a"), %%mm1   \n\t"
+               "paddb (%3, %%"REG_a"), %%mm1   \n\t"
                "movq %%mm0, %%mm2              \n\t"
                "movq %%mm1, %%mm3              \n\t"
                "punpcklbw %%mm0, %%mm0         \n\t"
@@ -239,12 +239,12 @@ static inline void lineNoiseAvg_MMX(uint8_t *dst, uint8_t *src, int len, int8_t
                "psrlw $8, %%mm1                \n\t"
                "psrlw $8, %%mm3                \n\t"
                 "packuswb %%mm3, %%mm1         \n\t"
-               "movq %%mm1, (%4, %%eax)        \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movq %%mm1, (%4, %%"REG_a")    \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
                :: "r" (src+mmx_len), "r" (shift[0]+mmx_len), "r" (shift[1]+mmx_len), "r" (shift[2]+mmx_len), 
                    "r" (dst+mmx_len), "g" (-mmx_len)
-               : "%eax"
+               : "%"REG_a
        );
 
        if(mmx_len!=len){
index 0447ab9..6cc147b 100644 (file)
@@ -357,9 +357,9 @@ static void store_slice_mmx(uint8_t *dst, int16_t *src, int dst_stride, int src_
                        "psraw %%mm2, %%mm1     \n\t"
                        "packuswb %%mm1, %%mm0  \n\t"
                        "movq %%mm0, (%1)       \n\t"
-                       "addl $16, %0           \n\t"
-                       "addl $8, %1            \n\t"
-                       "cmpl %2, %1            \n\t"
+                       "add $16, %0            \n\t"
+                       "add $8, %1             \n\t"
+                       "cmp %2, %1             \n\t"
                        " jb 1b                 \n\t"
                        : "+r" (src1), "+r"(dst1)
                        : "r"(dst + width), "r"(dither[y]), "g"(log2_scale), "g"(6-log2_scale)
index 779fb67..76211d9 100644 (file)
@@ -61,7 +61,7 @@ static void deint(unsigned char *dest, int ds, unsigned char *src, int ss, int w
 static void qpel_li_3DNOW(unsigned char *d, unsigned char *s, int w, int h, int ds, int ss, int up)
 {
        int i, j, ssd=ss;
-       int crap1, crap2;
+       long crap1, crap2;
        if (up) {
                ssd = -ss;
                memcpy(d, s, w);
@@ -71,17 +71,17 @@ static void qpel_li_3DNOW(unsigned char *d, unsigned char *s, int w, int h, int
        for (i=h-1; i; i--) {
                asm volatile(
                        "1: \n\t"
-                       "movq (%%esi), %%mm0 \n\t"
-                       "movq (%%esi,%%eax), %%mm1 \n\t"
+                       "movq (%%"REG_S"), %%mm0 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
                        "pavgusb %%mm0, %%mm1 \n\t"
-                       "addl $8, %%esi \n\t"
+                       "add $8, %%"REG_S" \n\t"
                        "pavgusb %%mm0, %%mm1 \n\t"
-                       "movq %%mm1, (%%edi) \n\t"
-                       "addl $8, %%edi \n\t"
+                       "movq %%mm1, (%%"REG_D") \n\t"
+                       "add $8, %%"REG_D" \n\t"
                        "decl %%ecx \n\t"
                        "jnz 1b \n\t"
                        : "=S"(crap1), "=D"(crap2)
-                       : "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+                       : "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
                );
                for (j=w-(w&7); j<w; j++)
                        d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -97,7 +97,7 @@ static void qpel_li_3DNOW(unsigned char *d, unsigned char *s, int w, int h, int
 static void qpel_li_MMX2(unsigned char *d, unsigned char *s, int w, int h, int ds, int ss, int up)
 {
        int i, j, ssd=ss;
-       int crap1, crap2;
+       long crap1, crap2;
        if (up) {
                ssd = -ss;
                memcpy(d, s, w);
@@ -108,17 +108,17 @@ static void qpel_li_MMX2(unsigned char *d, unsigned char *s, int w, int h, int d
                asm volatile(
                        "pxor %%mm7, %%mm7 \n\t"
                        "2: \n\t"
-                       "movq (%%esi), %%mm0 \n\t"
-                       "movq (%%esi,%%eax), %%mm1 \n\t"
+                       "movq (%%"REG_S"), %%mm0 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
                        "pavgb %%mm0, %%mm1 \n\t"
-                       "addl $8, %%esi \n\t"
+                       "add $8, %%"REG_S" \n\t"
                        "pavgb %%mm0, %%mm1 \n\t"
-                       "movq %%mm1, (%%edi) \n\t"
-                       "addl $8, %%edi \n\t"
+                       "movq %%mm1, (%%"REG_D") \n\t"
+                       "add $8, %%"REG_D" \n\t"
                        "decl %%ecx \n\t"
                        "jnz 2b \n\t"
                        : "=S"(crap1), "=D"(crap2)
-                       : "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+                       : "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
                );
                for (j=w-(w&7); j<w; j++)
                        d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -145,11 +145,11 @@ static void qpel_li_MMX(unsigned char *d, unsigned char *s, int w, int h, int ds
                asm volatile(
                        "pxor %%mm7, %%mm7 \n\t"
                        "3: \n\t"
-                       "movq (%%esi), %%mm0 \n\t"
-                       "movq (%%esi), %%mm1 \n\t"
-                       "movq (%%esi,%%eax), %%mm2 \n\t"
-                       "movq (%%esi,%%eax), %%mm3 \n\t"
-                       "addl $8, %%esi \n\t"
+                       "movq (%%"REG_S"), %%mm0 \n\t"
+                       "movq (%%"REG_S"), %%mm1 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm3 \n\t"
+                       "add $8, %%"REG_S" \n\t"
                        "punpcklbw %%mm7, %%mm0 \n\t"
                        "punpckhbw %%mm7, %%mm1 \n\t"
                        "punpcklbw %%mm7, %%mm2 \n\t"
@@ -163,12 +163,12 @@ static void qpel_li_MMX(unsigned char *d, unsigned char *s, int w, int h, int ds
                        "psrlw $2, %%mm2 \n\t"
                        "psrlw $2, %%mm3 \n\t"
                        "packsswb %%mm3, %%mm2 \n\t"
-                       "movq %%mm2, (%%edi) \n\t"
-                       "addl $8, %%edi \n\t"
+                       "movq %%mm2, (%%"REG_D") \n\t"
+                       "add $8, %%"REG_D" \n\t"
                        "decl %%ecx \n\t"
                        "jnz 3b \n\t"
                        : "=S"(crap1), "=D"(crap2)
-                       : "c"(w>>3), "S"(s), "D"(d), "a"(ssd)
+                       : "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd)
                );
                for (j=w-(w&7); j<w; j++)
                        d[j] = (s[j+ssd] + 3*s[j])>>2;
@@ -198,15 +198,15 @@ static void qpel_4tap_MMX(unsigned char *d, unsigned char *s, int w, int h, int
        for (i=h-3; i; i--) {
                asm volatile(
                        "pxor %%mm0, %%mm0 \n\t"
-                       "movq (%%edx), %%mm4 \n\t"
-                       "movq 8(%%edx), %%mm5 \n\t"
-                       "movq 16(%%edx), %%mm6 \n\t"
-                       "movq 24(%%edx), %%mm7 \n\t"
+                       "movq (%%"REG_d"), %%mm4 \n\t"
+                       "movq 8(%%"REG_d"), %%mm5 \n\t"
+                       "movq 16(%%"REG_d"), %%mm6 \n\t"
+                       "movq 24(%%"REG_d"), %%mm7 \n\t"
                        "4: \n\t"
 
-                       "movq (%%esi,%%eax), %%mm1 \n\t"
-                       "movq (%%esi), %%mm2 \n\t"
-                       "movq (%%esi,%%ebx), %%mm3 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm1 \n\t"
+                       "movq (%%"REG_S"), %%mm2 \n\t"
+                       "movq (%%"REG_S",%%"REG_b"), %%mm3 \n\t"
                        "punpcklbw %%mm0, %%mm1 \n\t"
                        "punpcklbw %%mm0, %%mm2 \n\t"
                        "pmullw %%mm4, %%mm1 \n\t"
@@ -214,38 +214,38 @@ static void qpel_4tap_MMX(unsigned char *d, unsigned char *s, int w, int h, int
                        "pmullw %%mm5, %%mm2 \n\t"
                        "paddusw %%mm2, %%mm1 \n\t"
                        "pmullw %%mm6, %%mm3 \n\t"
-                       "movq (%%esi,%%eax,2), %%mm2 \n\t"
+                       "movq (%%"REG_S",%%"REG_a",2), %%mm2 \n\t"
                        "psubusw %%mm3, %%mm1 \n\t"
                        "punpcklbw %%mm0, %%mm2 \n\t"   
                        "pmullw %%mm7, %%mm2 \n\t"
                        "psubusw %%mm2, %%mm1 \n\t"
                        "psrlw $7, %%mm1 \n\t"
 
-                       "movq (%%esi,%%eax), %%mm2 \n\t"
-                       "movq (%%esi), %%mm3 \n\t"
+                       "movq (%%"REG_S",%%"REG_a"), %%mm2 \n\t"
+                       "movq (%%"REG_S"), %%mm3 \n\t"
                        "punpckhbw %%mm0, %%mm2 \n\t"
                        "punpckhbw %%mm0, %%mm3 \n\t"
                        "pmullw %%mm4, %%mm2 \n\t"
                        "pmullw %%mm5, %%mm3 \n\t"
                        "paddusw %%mm3, %%mm2 \n\t"
-                       "movq (%%esi,%%ebx), %%mm3 \n\t"
+                       "movq (%%"REG_S",%%"REG_b"), %%mm3 \n\t"
                        "punpckhbw %%mm0, %%mm3 \n\t"
                        "pmullw %%mm6, %%mm3 \n\t"
                        "psubusw %%mm3, %%mm2 \n\t"
-                       "movq (%%esi,%%eax,2), %%mm3 \n\t"
+                       "movq (%%"REG_S",%%"REG_a",2), %%mm3 \n\t"
                        "punpckhbw %%mm0, %%mm3 \n\t"   
-                       "addl $8, %%esi \n\t"
+                       "add $8, %%"REG_S" \n\t"
                        "pmullw %%mm7, %%mm3 \n\t"
                        "psubusw %%mm3, %%mm2 \n\t"
                        "psrlw $7, %%mm2 \n\t"
                        
                        "packuswb %%mm2, %%mm1 \n\t"
-                       "movq %%mm1, (%%edi) \n\t"
-                       "addl $8, %%edi \n\t"
+                       "movq %%mm1, (%%"REG_D") \n\t"
+                       "add $8, %%"REG_D" \n\t"
                        "decl %%ecx \n\t"
                        "jnz 4b \n\t"
                        : "=S"(crap1), "=D"(crap2)
-                       : "c"(w>>3), "S"(s), "D"(d), "a"(ssd), "b"(-ssd), "d"(filter)
+                       : "c"(w>>3), "S"(s), "D"(d), "a"((long)ssd), "b"((long)-ssd), "d"(filter)
                );
                for (j=w-(w&7); j<w; j++)
                        d[j] = (-9*s[j-ssd] + 111*s[j] + 29*s[j+ssd] - 3*s[j+ssd+ssd])>>7;
index e2a1922..a3330ea 100644 (file)
@@ -17,7 +17,7 @@
 //Feel free to fine-tune the above 2, it might be possible to get some speedup with them :)
 
 //#define STATISTICS
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #define CAN_COMPILE_X86_ASM
 #endif
 
@@ -50,7 +50,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#undef ARCH_X86
 /*
 #ifdef COMPILE_C
 #undef HAVE_MMX
@@ -69,7 +68,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "aclib_template.c"
 #endif
@@ -82,7 +80,6 @@
 #undef HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "aclib_template.c"
 #endif
@@ -95,7 +92,6 @@
 #define HAVE_3DNOW
 #undef HAVE_SSE
 #undef HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _3DNow
 #include "aclib_template.c"
 #endif
 #undef HAVE_3DNOW
 #define HAVE_SSE
 #define HAVE_SSE2
-#define ARCH_X86
 #define RENAME(a) a ## _SSE
 #include "aclib_template.c"
 #endif
index 54b420e..0b50f7e 100644 (file)
@@ -257,62 +257,62 @@ static void * RENAME(fast_memcpy)(void * to, const void * from, size_t len)
        // Pure Assembly cuz gcc is a bit unpredictable ;)
        if(i>=BLOCK_SIZE/64)
                asm volatile(
-                       "xorl %%eax, %%eax      \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        ".balign 16             \n\t"
                        "1:                     \n\t"
-                               "movl (%0, %%eax), %%ebx        \n\t"
-                               "movl 32(%0, %%eax), %%ebx      \n\t"
-                               "movl 64(%0, %%eax), %%ebx      \n\t"
-                               "movl 96(%0, %%eax), %%ebx      \n\t"
-                               "addl $128, %%eax               \n\t"
-                               "cmpl %3, %%eax                 \n\t"
+                               "movl (%0, %%"REG_a"), %%ebx    \n\t"
+                               "movl 32(%0, %%"REG_a"), %%ebx  \n\t"
+                               "movl 64(%0, %%"REG_a"), %%ebx  \n\t"
+                               "movl 96(%0, %%"REG_a"), %%ebx  \n\t"
+                               "add $128, %%"REG_a"            \n\t"
+                               "cmp %3, %%"REG_a"              \n\t"
                                " jb 1b                         \n\t"
 
-                       "xorl %%eax, %%eax      \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
 
                                ".balign 16             \n\t"
                                "2:                     \n\t"
-                               "movq (%0, %%eax), %%mm0\n"
-                               "movq 8(%0, %%eax), %%mm1\n"
-                               "movq 16(%0, %%eax), %%mm2\n"
-                               "movq 24(%0, %%eax), %%mm3\n"
-                               "movq 32(%0, %%eax), %%mm4\n"
-                               "movq 40(%0, %%eax), %%mm5\n"
-                               "movq 48(%0, %%eax), %%mm6\n"
-                               "movq 56(%0, %%eax), %%mm7\n"
-                               MOVNTQ" %%mm0, (%1, %%eax)\n"
-                               MOVNTQ" %%mm1, 8(%1, %%eax)\n"
-                               MOVNTQ" %%mm2, 16(%1, %%eax)\n"
-                               MOVNTQ" %%mm3, 24(%1, %%eax)\n"
-                               MOVNTQ" %%mm4, 32(%1, %%eax)\n"
-                               MOVNTQ" %%mm5, 40(%1, %%eax)\n"
-                               MOVNTQ" %%mm6, 48(%1, %%eax)\n"
-                               MOVNTQ" %%mm7, 56(%1, %%eax)\n"
-                               "addl $64, %%eax                \n\t"
-                               "cmpl %3, %%eax         \n\t"
+                               "movq (%0, %%"REG_a"), %%mm0\n"
+                               "movq 8(%0, %%"REG_a"), %%mm1\n"
+                               "movq 16(%0, %%"REG_a"), %%mm2\n"
+                               "movq 24(%0, %%"REG_a"), %%mm3\n"
+                               "movq 32(%0, %%"REG_a"), %%mm4\n"
+                               "movq 40(%0, %%"REG_a"), %%mm5\n"
+                               "movq 48(%0, %%"REG_a"), %%mm6\n"
+                               "movq 56(%0, %%"REG_a"), %%mm7\n"
+                               MOVNTQ" %%mm0, (%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm1, 8(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm2, 16(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm3, 24(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm4, 32(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm5, 40(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm6, 48(%1, %%"REG_a")\n"
+                               MOVNTQ" %%mm7, 56(%1, %%"REG_a")\n"
+                               "add $64, %%"REG_a"             \n\t"
+                               "cmp %3, %%"REG_a"              \n\t"
                                "jb 2b                          \n\t"
 
 #if CONFUSION_FACTOR > 0
        // a few percent speedup on out of order executing CPUs
-                       "movl %5, %%eax         \n\t"
+                       "mov %5, %%"REG_a"              \n\t"
                                "2:                     \n\t"
                                "movl (%0), %%ebx       \n\t"
                                "movl (%0), %%ebx       \n\t"
                                "movl (%0), %%ebx       \n\t"
                                "movl (%0), %%ebx       \n\t"
-                               "decl %%eax             \n\t"
+                               "dec %%"REG_a"          \n\t"
                                " jnz 2b                \n\t"
 #endif
 
-                       "xorl %%eax, %%eax      \n\t"
-                       "addl %3, %0            \n\t"
-                       "addl %3, %1            \n\t"
-                       "subl %4, %2            \n\t"
-                       "cmpl %4, %2            \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
+                       "add %3, %0             \n\t"
+                       "add %3, %1             \n\t"
+                       "sub %4, %2             \n\t"
+                       "cmp %4, %2             \n\t"
                        " jae 1b                \n\t"
                                : "+r" (from), "+r" (to), "+r" (i)
-                               : "r" (BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" (CONFUSION_FACTOR)
-                               : "%eax", "%ebx"
+                               : "r" ((long)BLOCK_SIZE), "i" (BLOCK_SIZE/64), "i" ((long)CONFUSION_FACTOR)
+                               : "%"REG_a, "%ebx"
                );
 
        for(; i>0; i--)
index 742174e..3c616dc 100644 (file)
@@ -14,7 +14,7 @@
 
 extern int verbose; // defined in mplayer.c
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #define CAN_COMPILE_X86_ASM
 #endif
 
@@ -48,18 +48,18 @@ static const unsigned long long mask24hl  __attribute__((aligned(8))) = 0x0000FF
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#undef ARCH_X86
+
+#ifndef CAN_COMPILE_X86_ASM
 
 #ifdef COMPILE_C
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#undef ARCH_X86
 #define RENAME(a) a ## _C
 #include "osd_template.c"
 #endif
 
-#ifdef CAN_COMPILE_X86_ASM
+#else
 
 //X86 noMMX versions
 #ifdef COMPILE_C
@@ -67,7 +67,6 @@ static const unsigned long long mask24hl  __attribute__((aligned(8))) = 0x0000FF
 #undef HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _X86
 #include "osd_template.c"
 #endif
@@ -78,7 +77,6 @@ static const unsigned long long mask24hl  __attribute__((aligned(8))) = 0x0000FF
 #define HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "osd_template.c"
 #endif
@@ -89,7 +87,6 @@ static const unsigned long long mask24hl  __attribute__((aligned(8))) = 0x0000FF
 #define HAVE_MMX
 #define HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "osd_template.c"
 #endif
@@ -100,7 +97,6 @@ static const unsigned long long mask24hl  __attribute__((aligned(8))) = 0x0000FF
 #define HAVE_MMX
 #undef HAVE_MMX2
 #define HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _3DNow
 #include "osd_template.c"
 #endif
@@ -129,7 +125,7 @@ void vo_draw_alpha_yv12(int w,int h, unsigned char* src, unsigned char *srca, in
                vo_draw_alpha_yv12_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
                vo_draw_alpha_yv12_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                vo_draw_alpha_yv12_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
                vo_draw_alpha_yv12_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -159,7 +155,7 @@ void vo_draw_alpha_yuy2(int w,int h, unsigned char* src, unsigned char *srca, in
                vo_draw_alpha_yuy2_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
                vo_draw_alpha_yuy2_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                vo_draw_alpha_yuy2_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
                vo_draw_alpha_yuy2_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -189,7 +185,7 @@ void vo_draw_alpha_uyvy(int w,int h, unsigned char* src, unsigned char *srca, in
                vo_draw_alpha_uyvy_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
                vo_draw_alpha_uyvy_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                vo_draw_alpha_uyvy_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
                vo_draw_alpha_uyvy_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -219,7 +215,7 @@ void vo_draw_alpha_rgb24(int w,int h, unsigned char* src, unsigned char *srca, i
                vo_draw_alpha_rgb24_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
                vo_draw_alpha_rgb24_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                vo_draw_alpha_rgb24_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
                vo_draw_alpha_rgb24_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -249,7 +245,7 @@ void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, i
                vo_draw_alpha_rgb32_3DNow(w, h, src, srca, srcstride, dstbase, dststride);
 #elif defined (HAVE_MMX)
                vo_draw_alpha_rgb32_MMX(w, h, src, srca, srcstride, dstbase, dststride);
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                vo_draw_alpha_rgb32_X86(w, h, src, srca, srcstride, dstbase, dststride);
 #else
                vo_draw_alpha_rgb32_C(w, h, src, srca, srcstride, dstbase, dststride);
@@ -294,7 +290,7 @@ void vo_draw_alpha_init(){
                        mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit 3DNow) Optimized OnScreenDisplay\n");
 #elif defined (HAVE_MMX)
                        mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX Optimized OnScreenDisplay\n");
-#elif defined (ARCH_X86)
+#elif defined(ARCH_X86) || defined(ARCH_X86_64)
                        mp_msg(MSGT_OSD,MSGL_INFO,"Using X86 Optimized OnScreenDisplay\n");
 #else
                        mp_msg(MSGT_OSD,MSGL_INFO,"Using Unoptimized OnScreenDisplay\n");
index 5c8c009..e2ada2c 100644 (file)
@@ -189,7 +189,7 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src,
     for(y=0;y<h;y++){
         register unsigned char *dst = dstbase;
         register int x;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX
     asm volatile(
        PREFETCHW" %0\n\t"
@@ -253,7 +253,7 @@ static inline void RENAME(vo_draw_alpha_rgb24)(int w,int h, unsigned char* src,
                "addl %2, %%eax\n\t"
                "movb %%ah, 2(%0)\n\t"
                :
-               :"r" (dst),
+               :"D" (dst),
                 "r" ((unsigned)srca[x]),
                 "r" (((unsigned)src[x])<<8)
                :"%eax", "%ecx"
@@ -293,7 +293,7 @@ static inline void RENAME(vo_draw_alpha_rgb32)(int w,int h, unsigned char* src,
 #endif
     for(y=0;y<h;y++){
         register int x;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX
 #ifdef HAVE_3DNOW
     asm volatile(
index e2d530f..45770bd 100644 (file)
@@ -11,6 +11,7 @@
 #include "../config.h"
 #include "rgb2rgb.h"
 #include "swscale.h"
+#include "../cpudetect.h"
 #include "../mangle.h"
 #include "../bswap.h"
 #include "../libvo/fastmemcpy.h"
@@ -68,7 +69,7 @@ void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *sr
                        int srcStride1, int srcStride2,
                        int srcStride3, int dstStride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static const uint64_t mmx_null  __attribute__((aligned(8))) = 0x0000000000000000ULL;
 static const uint64_t mmx_one   __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
 static const uint64_t mask32b  attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
@@ -152,7 +153,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
 #define RENAME(a) a ## _C
 #include "rgb2rgb_template.c"
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 //MMX versions
 #undef RENAME
@@ -181,7 +182,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
 #define RENAME(a) a ## _3DNOW
 #include "rgb2rgb_template.c"
 
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 /*
  rgb15->rgb16 Original by Strepto/Astral
@@ -191,7 +192,7 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
 */
 
 void sws_rgb2rgb_init(int flags){
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        if(flags & SWS_CPU_CAPS_MMX2){
                rgb15to16= rgb15to16_MMX2;
                rgb15to24= rgb15to24_MMX2;
index d3ccb95..8993178 100644 (file)
@@ -349,9 +349,9 @@ static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, unsigned
                "pslld $11, %%mm3               \n\t"
                "por %%mm3, %%mm0               \n\t"
                MOVNTQ" %%mm0, (%0)             \n\t"
-               "addl $16, %1                   \n\t"
-               "addl $8, %0                    \n\t"
-               "cmpl %2, %1                    \n\t"
+               "add $16, %1                    \n\t"
+               "add $8, %0                     \n\t"
+               "cmp %2, %1                     \n\t"
                " jb 1b                         \n\t"
                : "+r" (d), "+r"(s)
                : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
@@ -509,9 +509,9 @@ static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, unsigned
                "pslld $10, %%mm3               \n\t"
                "por %%mm3, %%mm0               \n\t"
                MOVNTQ" %%mm0, (%0)             \n\t"
-               "addl $16, %1                   \n\t"
-               "addl $8, %0                    \n\t"
-               "cmpl %2, %1                    \n\t"
+               "add $16, %1                    \n\t"
+               "add $8, %0                     \n\t"
+               "cmp %2, %1                     \n\t"
                " jb 1b                         \n\t"
                : "+r" (d), "+r"(s)
                : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
@@ -1345,11 +1345,11 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsign
 #ifdef HAVE_MMX
 /* TODO: unroll this loop */
        asm volatile (
-               "xorl %%eax, %%eax              \n\t"
+               "xor %%"REG_a", %%"REG_a"       \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               PREFETCH" 32(%0, %%eax)         \n\t"
-               "movq (%0, %%eax), %%mm0        \n\t"
+               PREFETCH" 32(%0, %%"REG_a")     \n\t"
+               "movq (%0, %%"REG_a"), %%mm0    \n\t"
                "movq %%mm0, %%mm1              \n\t"
                "movq %%mm0, %%mm2              \n\t"
                "pslld $16, %%mm0               \n\t"
@@ -1359,12 +1359,12 @@ static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, unsign
                "pand "MANGLE(mask32b)", %%mm1  \n\t"
                "por %%mm0, %%mm2               \n\t"
                "por %%mm1, %%mm2               \n\t"
-               MOVNTQ" %%mm2, (%1, %%eax)      \n\t"
-               "addl $8, %%eax                 \n\t"
-               "cmpl %2, %%eax                 \n\t"
+               MOVNTQ" %%mm2, (%1, %%"REG_a")  \n\t"
+               "add $8, %%"REG_a"              \n\t"
+               "cmp %2, %%"REG_a"              \n\t"
                " jb 1b                         \n\t"
-               :: "r" (src), "r"(dst), "r" (src_size-7)
-               : "%eax"
+               :: "r" (src), "r"(dst), "r" ((long)src_size-7)
+               : "%"REG_a
        );
 
        __asm __volatile(SFENCE:::"memory");
@@ -1391,43 +1391,43 @@ static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, unsign
 {
        unsigned i;
 #ifdef HAVE_MMX
-       int mmx_size= 23 - src_size;
+       long mmx_size= 23 - src_size;
        asm volatile (
                "movq "MANGLE(mask24r)", %%mm5  \n\t"
                "movq "MANGLE(mask24g)", %%mm6  \n\t"
                "movq "MANGLE(mask24b)", %%mm7  \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               PREFETCH" 32(%1, %%eax)         \n\t"
-               "movq   (%1, %%eax), %%mm0      \n\t" // BGR BGR BG
-               "movq   (%1, %%eax), %%mm1      \n\t" // BGR BGR BG
-               "movq  2(%1, %%eax), %%mm2      \n\t" // R BGR BGR B
+               PREFETCH" 32(%1, %%"REG_a")     \n\t"
+               "movq   (%1, %%"REG_a"), %%mm0  \n\t" // BGR BGR BG
+               "movq   (%1, %%"REG_a"), %%mm1  \n\t" // BGR BGR BG
+               "movq  2(%1, %%"REG_a"), %%mm2  \n\t" // R BGR BGR B
                "psllq $16, %%mm0               \n\t" // 00 BGR BGR
                "pand %%mm5, %%mm0              \n\t"
                "pand %%mm6, %%mm1              \n\t"
                "pand %%mm7, %%mm2              \n\t"
                "por %%mm0, %%mm1               \n\t"
                "por %%mm2, %%mm1               \n\t"                
-               "movq  6(%1, %%eax), %%mm0      \n\t" // BGR BGR BG
-               MOVNTQ" %%mm1,   (%2, %%eax)    \n\t" // RGB RGB RG
-               "movq  8(%1, %%eax), %%mm1      \n\t" // R BGR BGR B
-               "movq 10(%1, %%eax), %%mm2      \n\t" // GR BGR BGR
+               "movq  6(%1, %%"REG_a"), %%mm0  \n\t" // BGR BGR BG
+               MOVNTQ" %%mm1,   (%2, %%"REG_a")\n\t" // RGB RGB RG
+               "movq  8(%1, %%"REG_a"), %%mm1  \n\t" // R BGR BGR B
+               "movq 10(%1, %%"REG_a"), %%mm2  \n\t" // GR BGR BGR
                "pand %%mm7, %%mm0              \n\t"
                "pand %%mm5, %%mm1              \n\t"
                "pand %%mm6, %%mm2              \n\t"
                "por %%mm0, %%mm1               \n\t"
                "por %%mm2, %%mm1               \n\t"                
-               "movq 14(%1, %%eax), %%mm0      \n\t" // R BGR BGR B
-               MOVNTQ" %%mm1,  8(%2, %%eax)    \n\t" // B RGB RGB R
-               "movq 16(%1, %%eax), %%mm1      \n\t" // GR BGR BGR
-               "movq 18(%1, %%eax), %%mm2      \n\t" // BGR BGR BG
+               "movq 14(%1, %%"REG_a"), %%mm0  \n\t" // R BGR BGR B
+               MOVNTQ" %%mm1,  8(%2, %%"REG_a")\n\t" // B RGB RGB R
+               "movq 16(%1, %%"REG_a"), %%mm1  \n\t" // GR BGR BGR
+               "movq 18(%1, %%"REG_a"), %%mm2  \n\t" // BGR BGR BG
                "pand %%mm6, %%mm0              \n\t"
                "pand %%mm7, %%mm1              \n\t"
                "pand %%mm5, %%mm2              \n\t"
                "por %%mm0, %%mm1               \n\t"
                "por %%mm2, %%mm1               \n\t"                
-               MOVNTQ" %%mm1, 16(%2, %%eax)    \n\t"
-               "addl $24, %%eax                \n\t"
+               MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
+               "add $24, %%"REG_a"             \n\t"
                " js 1b                         \n\t"
                : "+a" (mmx_size)
                : "r" (src-mmx_size), "r"(dst-mmx_size)
@@ -1465,20 +1465,20 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
 #ifdef HAVE_MMX
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
                asm volatile(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 32(%1, %%eax, 2)      \n\t"
-                       PREFETCH" 32(%2, %%eax)         \n\t"
-                       PREFETCH" 32(%3, %%eax)         \n\t"
-                       "movq (%2, %%eax), %%mm0        \n\t" // U(0)
+                       PREFETCH" 32(%1, %%"REG_a", 2)  \n\t"
+                       PREFETCH" 32(%2, %%"REG_a")     \n\t"
+                       PREFETCH" 32(%3, %%"REG_a")     \n\t"
+                       "movq (%2, %%"REG_a"), %%mm0    \n\t" // U(0)
                        "movq %%mm0, %%mm2              \n\t" // U(0)
-                       "movq (%3, %%eax), %%mm1        \n\t" // V(0)
+                       "movq (%3, %%"REG_a"), %%mm1    \n\t" // V(0)
                        "punpcklbw %%mm1, %%mm0         \n\t" // UVUV UVUV(0)
                        "punpckhbw %%mm1, %%mm2         \n\t" // UVUV UVUV(8)
 
-                       "movq (%1, %%eax,2), %%mm3      \n\t" // Y(0)
-                       "movq 8(%1, %%eax,2), %%mm5     \n\t" // Y(8)
+                       "movq (%1, %%"REG_a",2), %%mm3  \n\t" // Y(0)
+                       "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
                        "movq %%mm3, %%mm4              \n\t" // Y(0)
                        "movq %%mm5, %%mm6              \n\t" // Y(8)
                        "punpcklbw %%mm0, %%mm3         \n\t" // YUYV YUYV(0)
@@ -1486,16 +1486,16 @@ static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *u
                        "punpcklbw %%mm2, %%mm5         \n\t" // YUYV YUYV(8)
                        "punpckhbw %%mm2, %%mm6         \n\t" // YUYV YUYV(12)
 
-                       MOVNTQ" %%mm3, (%0, %%eax, 4)   \n\t"
-                       MOVNTQ" %%mm4, 8(%0, %%eax, 4)  \n\t"
-                       MOVNTQ" %%mm5, 16(%0, %%eax, 4) \n\t"
-                       MOVNTQ" %%mm6, 24(%0, %%eax, 4) \n\t"
+                       MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
 
-                       "addl $8, %%eax                 \n\t"
-                       "cmpl %4, %%eax                 \n\t"
+                       "add $8, %%"REG_a"              \n\t"
+                       "cmp %4, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
-                       ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
-                       : "%eax"
+                       ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+                       : "%"REG_a
                );
 #else
 
@@ -1618,20 +1618,20 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
 #ifdef HAVE_MMX
 //FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
                asm volatile(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 32(%1, %%eax, 2)      \n\t"
-                       PREFETCH" 32(%2, %%eax)         \n\t"
-                       PREFETCH" 32(%3, %%eax)         \n\t"
-                       "movq (%2, %%eax), %%mm0        \n\t" // U(0)
+                       PREFETCH" 32(%1, %%"REG_a", 2)  \n\t"
+                       PREFETCH" 32(%2, %%"REG_a")     \n\t"
+                       PREFETCH" 32(%3, %%"REG_a")     \n\t"
+                       "movq (%2, %%"REG_a"), %%mm0    \n\t" // U(0)
                        "movq %%mm0, %%mm2              \n\t" // U(0)
-                       "movq (%3, %%eax), %%mm1        \n\t" // V(0)
+                       "movq (%3, %%"REG_a"), %%mm1    \n\t" // V(0)
                        "punpcklbw %%mm1, %%mm0         \n\t" // UVUV UVUV(0)
                        "punpckhbw %%mm1, %%mm2         \n\t" // UVUV UVUV(8)
 
-                       "movq (%1, %%eax,2), %%mm3      \n\t" // Y(0)
-                       "movq 8(%1, %%eax,2), %%mm5     \n\t" // Y(8)
+                       "movq (%1, %%"REG_a",2), %%mm3  \n\t" // Y(0)
+                       "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
                        "movq %%mm0, %%mm4              \n\t" // Y(0)
                        "movq %%mm2, %%mm6              \n\t" // Y(8)
                        "punpcklbw %%mm3, %%mm0         \n\t" // YUYV YUYV(0)
@@ -1639,16 +1639,16 @@ static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *u
                        "punpcklbw %%mm5, %%mm2         \n\t" // YUYV YUYV(8)
                        "punpckhbw %%mm5, %%mm6         \n\t" // YUYV YUYV(12)
 
-                       MOVNTQ" %%mm0, (%0, %%eax, 4)   \n\t"
-                       MOVNTQ" %%mm4, 8(%0, %%eax, 4)  \n\t"
-                       MOVNTQ" %%mm2, 16(%0, %%eax, 4) \n\t"
-                       MOVNTQ" %%mm6, 24(%0, %%eax, 4) \n\t"
+                       MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
+                       MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
 
-                       "addl $8, %%eax                 \n\t"
-                       "cmpl %4, %%eax                 \n\t"
+                       "add $8, %%"REG_a"              \n\t"
+                       "cmp %4, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
-                       ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
-                       : "%eax"
+                       ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+                       : "%"REG_a
                );
 #else
 //FIXME adapt the alpha asm code from yv12->yuy2
@@ -1740,14 +1740,14 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
        {
 #ifdef HAVE_MMX
                asm volatile(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        "pcmpeqw %%mm7, %%mm7           \n\t"
                        "psrlw $8, %%mm7                \n\t" // FF,00,FF,00...
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 64(%0, %%eax, 4)      \n\t"
-                       "movq (%0, %%eax, 4), %%mm0     \n\t" // YUYV YUYV(0)
-                       "movq 8(%0, %%eax, 4), %%mm1    \n\t" // YUYV YUYV(4)
+                       PREFETCH" 64(%0, %%"REG_a", 4)  \n\t"
+                       "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+                       "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
                        "movq %%mm0, %%mm2              \n\t" // YUYV YUYV(0)
                        "movq %%mm1, %%mm3              \n\t" // YUYV YUYV(4)
                        "psrlw $8, %%mm0                \n\t" // U0V0 U0V0(0)
@@ -1757,10 +1757,10 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
                        "packuswb %%mm1, %%mm0          \n\t" // UVUV UVUV(0)
                        "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(0)
 
-                       MOVNTQ" %%mm2, (%1, %%eax, 2)   \n\t"
+                       MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
 
-                       "movq 16(%0, %%eax, 4), %%mm1   \n\t" // YUYV YUYV(8)
-                       "movq 24(%0, %%eax, 4), %%mm2   \n\t" // YUYV YUYV(12)
+                       "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
+                       "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
                        "movq %%mm1, %%mm3              \n\t" // YUYV YUYV(8)
                        "movq %%mm2, %%mm4              \n\t" // YUYV YUYV(12)
                        "psrlw $8, %%mm1                \n\t" // U0V0 U0V0(8)
@@ -1770,7 +1770,7 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
                        "packuswb %%mm2, %%mm1          \n\t" // UVUV UVUV(8)
                        "packuswb %%mm4, %%mm3          \n\t" // YYYY YYYY(8)
 
-                       MOVNTQ" %%mm3, 8(%1, %%eax, 2)  \n\t"
+                       MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
 
                        "movq %%mm0, %%mm2              \n\t" // UVUV UVUV(0)
                        "movq %%mm1, %%mm3              \n\t" // UVUV UVUV(8)
@@ -1781,28 +1781,28 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
                        "packuswb %%mm1, %%mm0          \n\t" // VVVV VVVV(0)
                        "packuswb %%mm3, %%mm2          \n\t" // UUUU UUUU(0)
 
-                       MOVNTQ" %%mm0, (%3, %%eax)      \n\t"
-                       MOVNTQ" %%mm2, (%2, %%eax)      \n\t"
+                       MOVNTQ" %%mm0, (%3, %%"REG_a")  \n\t"
+                       MOVNTQ" %%mm2, (%2, %%"REG_a")  \n\t"
 
-                       "addl $8, %%eax                 \n\t"
-                       "cmpl %4, %%eax                 \n\t"
+                       "add $8, %%"REG_a"              \n\t"
+                       "cmp %4, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
-                       ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
-                       : "memory", "%eax"
+                       ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+                       : "memory", "%"REG_a
                );
 
                ydst += lumStride;
                src  += srcStride;
 
                asm volatile(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 64(%0, %%eax, 4)      \n\t"
-                       "movq (%0, %%eax, 4), %%mm0     \n\t" // YUYV YUYV(0)
-                       "movq 8(%0, %%eax, 4), %%mm1    \n\t" // YUYV YUYV(4)
-                       "movq 16(%0, %%eax, 4), %%mm2   \n\t" // YUYV YUYV(8)
-                       "movq 24(%0, %%eax, 4), %%mm3   \n\t" // YUYV YUYV(12)
+                       PREFETCH" 64(%0, %%"REG_a", 4)  \n\t"
+                       "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+                       "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
+                       "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
+                       "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
                        "pand %%mm7, %%mm0              \n\t" // Y0Y0 Y0Y0(0)
                        "pand %%mm7, %%mm1              \n\t" // Y0Y0 Y0Y0(4)
                        "pand %%mm7, %%mm2              \n\t" // Y0Y0 Y0Y0(8)
@@ -1810,15 +1810,15 @@ static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t
                        "packuswb %%mm1, %%mm0          \n\t" // YYYY YYYY(0)
                        "packuswb %%mm3, %%mm2          \n\t" // YYYY YYYY(8)
 
-                       MOVNTQ" %%mm0, (%1, %%eax, 2)   \n\t"
-                       MOVNTQ" %%mm2, 8(%1, %%eax, 2)  \n\t"
+                       MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
 
-                       "addl $8, %%eax                 \n\t"
-                       "cmpl %4, %%eax                 \n\t"
+                       "add $8, %%"REG_a"              \n\t"
+                       "cmp %4, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
 
-                       ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
-                       : "memory", "%eax"
+                       ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+                       : "memory", "%"REG_a
                );
 #else
                unsigned i;
@@ -1877,16 +1877,16 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWid
 
        for(y=1; y<srcHeight; y++){
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-               const int mmxSize= srcWidth&~15;
+               const long mmxSize= srcWidth&~15;
                asm volatile(
-                       "movl %4, %%eax                 \n\t"
+                       "mov %4, %%"REG_a"              \n\t"
                        "1:                             \n\t"
-                       "movq (%0, %%eax), %%mm0        \n\t"
-                       "movq (%1, %%eax), %%mm1        \n\t"
-                       "movq 1(%0, %%eax), %%mm2       \n\t"
-                       "movq 1(%1, %%eax), %%mm3       \n\t"
-                       "movq -1(%0, %%eax), %%mm4      \n\t"
-                       "movq -1(%1, %%eax), %%mm5      \n\t"
+                       "movq (%0, %%"REG_a"), %%mm0    \n\t"
+                       "movq (%1, %%"REG_a"), %%mm1    \n\t"
+                       "movq 1(%0, %%"REG_a"), %%mm2   \n\t"
+                       "movq 1(%1, %%"REG_a"), %%mm3   \n\t"
+                       "movq -1(%0, %%"REG_a"), %%mm4  \n\t"
+                       "movq -1(%1, %%"REG_a"), %%mm5  \n\t"
                        PAVGB" %%mm0, %%mm5             \n\t"
                        PAVGB" %%mm0, %%mm3             \n\t"
                        PAVGB" %%mm0, %%mm5             \n\t"
@@ -1902,22 +1902,22 @@ static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWid
                        "punpcklbw %%mm2, %%mm4         \n\t"
                        "punpckhbw %%mm2, %%mm6         \n\t"
 #if 1
-                       MOVNTQ" %%mm5, (%2, %%eax, 2)   \n\t"
-                       MOVNTQ" %%mm7, 8(%2, %%eax, 2)  \n\t"
-                       MOVNTQ" %%mm4, (%3, %%eax, 2)   \n\t"
-                       MOVNTQ" %%mm6, 8(%3, %%eax, 2)  \n\t"
+                       MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
 #else
-                       "movq %%mm5, (%2, %%eax, 2)     \n\t"
-                       "movq %%mm7, 8(%2, %%eax, 2)    \n\t"
-                       "movq %%mm4, (%3, %%eax, 2)     \n\t"
-                       "movq %%mm6, 8(%3, %%eax, 2)    \n\t"
+                       "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
+                       "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+                       "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
+                       "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
 #endif
-                       "addl $8, %%eax                 \n\t"
+                       "add $8, %%"REG_a"              \n\t"
                        " js 1b                         \n\t"
                        :: "r" (src + mmxSize  ), "r" (src + srcStride + mmxSize  ),
                           "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
                           "g" (-mmxSize)
-                       : "%eax"
+                       : "%"REG_a
 
                );
 #else
@@ -2107,20 +2107,20 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                for(i=0; i<2; i++)
                {
                        asm volatile(
-                               "movl %2, %%eax                 \n\t"
+                               "mov %2, %%"REG_a"              \n\t"
                                "movq "MANGLE(bgr2YCoeff)", %%mm6               \n\t"
                                "movq "MANGLE(w1111)", %%mm5            \n\t"
                                "pxor %%mm7, %%mm7              \n\t"
-                               "leal (%%eax, %%eax, 2), %%ebx  \n\t"
+                               "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
                                ".balign 16                     \n\t"
                                "1:                             \n\t"
-                               PREFETCH" 64(%0, %%ebx)         \n\t"
-                               "movd (%0, %%ebx), %%mm0        \n\t"
-                               "movd 3(%0, %%ebx), %%mm1       \n\t"
+                               PREFETCH" 64(%0, %%"REG_b")     \n\t"
+                               "movd (%0, %%"REG_b"), %%mm0    \n\t"
+                               "movd 3(%0, %%"REG_b"), %%mm1   \n\t"
                                "punpcklbw %%mm7, %%mm0         \n\t"
                                "punpcklbw %%mm7, %%mm1         \n\t"
-                               "movd 6(%0, %%ebx), %%mm2       \n\t"
-                               "movd 9(%0, %%ebx), %%mm3       \n\t"
+                               "movd 6(%0, %%"REG_b"), %%mm2   \n\t"
+                               "movd 9(%0, %%"REG_b"), %%mm3   \n\t"
                                "punpcklbw %%mm7, %%mm2         \n\t"
                                "punpcklbw %%mm7, %%mm3         \n\t"
                                "pmaddwd %%mm6, %%mm0           \n\t"
@@ -2140,12 +2140,12 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                                "packssdw %%mm2, %%mm0          \n\t"
                                "psraw $7, %%mm0                \n\t"
 
-                               "movd 12(%0, %%ebx), %%mm4      \n\t"
-                               "movd 15(%0, %%ebx), %%mm1      \n\t"
+                               "movd 12(%0, %%"REG_b"), %%mm4  \n\t"
+                               "movd 15(%0, %%"REG_b"), %%mm1  \n\t"
                                "punpcklbw %%mm7, %%mm4         \n\t"
                                "punpcklbw %%mm7, %%mm1         \n\t"
-                               "movd 18(%0, %%ebx), %%mm2      \n\t"
-                               "movd 21(%0, %%ebx), %%mm3      \n\t"
+                               "movd 18(%0, %%"REG_b"), %%mm2  \n\t"
+                               "movd 21(%0, %%"REG_b"), %%mm3  \n\t"
                                "punpcklbw %%mm7, %%mm2         \n\t"
                                "punpcklbw %%mm7, %%mm3         \n\t"
                                "pmaddwd %%mm6, %%mm4           \n\t"
@@ -2162,39 +2162,39 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                                "packssdw %%mm3, %%mm2          \n\t"
                                "pmaddwd %%mm5, %%mm4           \n\t"
                                "pmaddwd %%mm5, %%mm2           \n\t"
-                               "addl $24, %%ebx                \n\t"
+                               "add $24, %%"REG_b"             \n\t"
                                "packssdw %%mm2, %%mm4          \n\t"
                                "psraw $7, %%mm4                \n\t"
 
                                "packuswb %%mm4, %%mm0          \n\t"
                                "paddusb "MANGLE(bgr2YOffset)", %%mm0   \n\t"
 
-                               MOVNTQ" %%mm0, (%1, %%eax)      \n\t"
-                               "addl $8, %%eax                 \n\t"
+                               MOVNTQ" %%mm0, (%1, %%"REG_a")  \n\t"
+                               "add $8, %%"REG_a"              \n\t"
                                " js 1b                         \n\t"
-                               : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
-                               : "%eax", "%ebx"
+                               : : "r" (src+width*3), "r" (ydst+width), "g" ((long)-width)
+                               : "%"REG_a, "%"REG_b
                        );
                        ydst += lumStride;
                        src  += srcStride;
                }
                src -= srcStride*2;
                asm volatile(
-                       "movl %4, %%eax                 \n\t"
+                       "mov %4, %%"REG_a"              \n\t"
                        "movq "MANGLE(w1111)", %%mm5            \n\t"
                        "movq "MANGLE(bgr2UCoeff)", %%mm6               \n\t"
                        "pxor %%mm7, %%mm7              \n\t"
-                       "leal (%%eax, %%eax, 2), %%ebx  \n\t"
-                       "addl %%ebx, %%ebx              \n\t"
+                       "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+                       "add %%"REG_b", %%"REG_b"       \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 64(%0, %%ebx)         \n\t"
-                       PREFETCH" 64(%1, %%ebx)         \n\t"
+                       PREFETCH" 64(%0, %%"REG_b")     \n\t"
+                       PREFETCH" 64(%1, %%"REG_b")     \n\t"
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-                       "movq (%0, %%ebx), %%mm0        \n\t"
-                       "movq (%1, %%ebx), %%mm1        \n\t"
-                       "movq 6(%0, %%ebx), %%mm2       \n\t"
-                       "movq 6(%1, %%ebx), %%mm3       \n\t"
+                       "movq (%0, %%"REG_b"), %%mm0    \n\t"
+                       "movq (%1, %%"REG_b"), %%mm1    \n\t"
+                       "movq 6(%0, %%"REG_b"), %%mm2   \n\t"
+                       "movq 6(%1, %%"REG_b"), %%mm3   \n\t"
                        PAVGB" %%mm1, %%mm0             \n\t"
                        PAVGB" %%mm3, %%mm2             \n\t"
                        "movq %%mm0, %%mm1              \n\t"
@@ -2206,10 +2206,10 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "punpcklbw %%mm7, %%mm0         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
 #else
-                       "movd (%0, %%ebx), %%mm0        \n\t"
-                       "movd (%1, %%ebx), %%mm1        \n\t"
-                       "movd 3(%0, %%ebx), %%mm2       \n\t"
-                       "movd 3(%1, %%ebx), %%mm3       \n\t"
+                       "movd (%0, %%"REG_b"), %%mm0    \n\t"
+                       "movd (%1, %%"REG_b"), %%mm1    \n\t"
+                       "movd 3(%0, %%"REG_b"), %%mm2   \n\t"
+                       "movd 3(%1, %%"REG_b"), %%mm3   \n\t"
                        "punpcklbw %%mm7, %%mm0         \n\t"
                        "punpcklbw %%mm7, %%mm1         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
@@ -2217,10 +2217,10 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "paddw %%mm1, %%mm0             \n\t"
                        "paddw %%mm3, %%mm2             \n\t"
                        "paddw %%mm2, %%mm0             \n\t"
-                       "movd 6(%0, %%ebx), %%mm4       \n\t"
-                       "movd 6(%1, %%ebx), %%mm1       \n\t"
-                       "movd 9(%0, %%ebx), %%mm2       \n\t"
-                       "movd 9(%1, %%ebx), %%mm3       \n\t"
+                       "movd 6(%0, %%"REG_b"), %%mm4   \n\t"
+                       "movd 6(%1, %%"REG_b"), %%mm1   \n\t"
+                       "movd 9(%0, %%"REG_b"), %%mm2   \n\t"
+                       "movd 9(%1, %%"REG_b"), %%mm3   \n\t"
                        "punpcklbw %%mm7, %%mm4         \n\t"
                        "punpcklbw %%mm7, %%mm1         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
@@ -2252,10 +2252,10 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "psraw $7, %%mm0                \n\t"
 
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-                       "movq 12(%0, %%ebx), %%mm4      \n\t"
-                       "movq 12(%1, %%ebx), %%mm1      \n\t"
-                       "movq 18(%0, %%ebx), %%mm2      \n\t"
-                       "movq 18(%1, %%ebx), %%mm3      \n\t"
+                       "movq 12(%0, %%"REG_b"), %%mm4  \n\t"
+                       "movq 12(%1, %%"REG_b"), %%mm1  \n\t"
+                       "movq 18(%0, %%"REG_b"), %%mm2  \n\t"
+                       "movq 18(%1, %%"REG_b"), %%mm3  \n\t"
                        PAVGB" %%mm1, %%mm4             \n\t"
                        PAVGB" %%mm3, %%mm2             \n\t"
                        "movq %%mm4, %%mm1              \n\t"
@@ -2267,10 +2267,10 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "punpcklbw %%mm7, %%mm4         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
 #else
-                       "movd 12(%0, %%ebx), %%mm4      \n\t"
-                       "movd 12(%1, %%ebx), %%mm1      \n\t"
-                       "movd 15(%0, %%ebx), %%mm2      \n\t"
-                       "movd 15(%1, %%ebx), %%mm3      \n\t"
+                       "movd 12(%0, %%"REG_b"), %%mm4  \n\t"
+                       "movd 12(%1, %%"REG_b"), %%mm1  \n\t"
+                       "movd 15(%0, %%"REG_b"), %%mm2  \n\t"
+                       "movd 15(%1, %%"REG_b"), %%mm3  \n\t"
                        "punpcklbw %%mm7, %%mm4         \n\t"
                        "punpcklbw %%mm7, %%mm1         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
@@ -2278,10 +2278,10 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "paddw %%mm1, %%mm4             \n\t"
                        "paddw %%mm3, %%mm2             \n\t"
                        "paddw %%mm2, %%mm4             \n\t"
-                       "movd 18(%0, %%ebx), %%mm5      \n\t"
-                       "movd 18(%1, %%ebx), %%mm1      \n\t"
-                       "movd 21(%0, %%ebx), %%mm2      \n\t"
-                       "movd 21(%1, %%ebx), %%mm3      \n\t"
+                       "movd 18(%0, %%"REG_b"), %%mm5  \n\t"
+                       "movd 18(%1, %%"REG_b"), %%mm1  \n\t"
+                       "movd 21(%0, %%"REG_b"), %%mm2  \n\t"
+                       "movd 21(%1, %%"REG_b"), %%mm3  \n\t"
                        "punpcklbw %%mm7, %%mm5         \n\t"
                        "punpcklbw %%mm7, %%mm1         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
@@ -2310,7 +2310,7 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "packssdw %%mm3, %%mm1          \n\t"
                        "pmaddwd %%mm5, %%mm4           \n\t"
                        "pmaddwd %%mm5, %%mm1           \n\t"
-                       "addl $24, %%ebx                \n\t"
+                       "add $24, %%"REG_b"             \n\t"
                        "packssdw %%mm1, %%mm4          \n\t" // V3 V2 U3 U2
                        "psraw $7, %%mm4                \n\t"
 
@@ -2319,14 +2319,13 @@ static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_
                        "punpckhdq %%mm4, %%mm1         \n\t"
                        "packsswb %%mm1, %%mm0          \n\t"
                        "paddb "MANGLE(bgr2UVOffset)", %%mm0    \n\t"
-
-                       "movd %%mm0, (%2, %%eax)        \n\t"
+                       "movd %%mm0, (%2, %%"REG_a")    \n\t"
                        "punpckhdq %%mm0, %%mm0         \n\t"
-                       "movd %%mm0, (%3, %%eax)        \n\t"
-                       "addl $4, %%eax                 \n\t"
+                       "movd %%mm0, (%3, %%"REG_a")    \n\t"
+                       "add $4, %%"REG_a"              \n\t"
                        " js 1b                         \n\t"
-                       : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
-                       : "%eax", "%ebx"
+                       : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" ((long)-chromWidth)
+                       : "%"REG_a, "%"REG_b
                );
 
                udst += chromStride;
@@ -2403,48 +2402,48 @@ void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
 #ifdef HAVE_MMX
 #ifdef HAVE_SSE2
                asm(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 64(%1, %%eax)         \n\t"
-                       PREFETCH" 64(%2, %%eax)         \n\t"
-                       "movdqa (%1, %%eax), %%xmm0     \n\t"
-                       "movdqa (%1, %%eax), %%xmm1     \n\t"
-                       "movdqa (%2, %%eax), %%xmm2     \n\t"
+                       PREFETCH" 64(%1, %%"REG_a")     \n\t"
+                       PREFETCH" 64(%2, %%"REG_a")     \n\t"
+                       "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
+                       "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
+                       "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
                        "punpcklbw %%xmm2, %%xmm0       \n\t"
                        "punpckhbw %%xmm2, %%xmm1       \n\t"
-                       "movntdq %%xmm0, (%0, %%eax, 2) \n\t"
-                       "movntdq %%xmm1, 16(%0, %%eax, 2)\n\t"
-                       "addl $16, %%eax                        \n\t"
-                       "cmpl %3, %%eax                 \n\t"
+                       "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
+                       "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
+                       "add $16, %%"REG_a"             \n\t"
+                       "cmp %3, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
-                       ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
-                       : "memory", "%eax"
+                       ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+                       : "memory", "%"REG_a""
                );
 #else
                asm(
-                       "xorl %%eax, %%eax              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t"
                        "1:                             \n\t"
-                       PREFETCH" 64(%1, %%eax)         \n\t"
-                       PREFETCH" 64(%2, %%eax)         \n\t"
-                       "movq (%1, %%eax), %%mm0        \n\t"
-                       "movq 8(%1, %%eax), %%mm2       \n\t"
+                       PREFETCH" 64(%1, %%"REG_a")     \n\t"
+                       PREFETCH" 64(%2, %%"REG_a")     \n\t"
+                       "movq (%1, %%"REG_a"), %%mm0    \n\t"
+                       "movq 8(%1, %%"REG_a"), %%mm2   \n\t"
                        "movq %%mm0, %%mm1              \n\t"
                        "movq %%mm2, %%mm3              \n\t"
-                       "movq (%2, %%eax), %%mm4        \n\t"
-                       "movq 8(%2, %%eax), %%mm5       \n\t"
+                       "movq (%2, %%"REG_a"), %%mm4    \n\t"
+                       "movq 8(%2, %%"REG_a"), %%mm5   \n\t"
                        "punpcklbw %%mm4, %%mm0         \n\t"
                        "punpckhbw %%mm4, %%mm1         \n\t"
                        "punpcklbw %%mm5, %%mm2         \n\t"
                        "punpckhbw %%mm5, %%mm3         \n\t"
-                       MOVNTQ" %%mm0, (%0, %%eax, 2)   \n\t"
-                       MOVNTQ" %%mm1, 8(%0, %%eax, 2)  \n\t"
-                       MOVNTQ" %%mm2, 16(%0, %%eax, 2) \n\t"
-                       MOVNTQ" %%mm3, 24(%0, %%eax, 2) \n\t"
-                       "addl $16, %%eax                        \n\t"
-                       "cmpl %3, %%eax                 \n\t"
+                       MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
+                       MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
+                       "add $16, %%"REG_a"             \n\t"
+                       "cmp %3, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
-                       ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
-                       : "memory", "%eax"
+                       ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+                       : "memory", "%"REG_a
                );
 #endif
                for(w= (width&(~15)); w < width; w++)
@@ -2582,7 +2581,7 @@ static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2
                        int srcStride1, int srcStride2,
                        int srcStride3, int dstStride)
 {
-    unsigned y,x,w,h;
+    unsigned long y,x,w,h;
     w=width/2; h=height;
     for(y=0;y<h;y++){
        const uint8_t* yp=src1+srcStride1*y;
index 10c841d..ce245b0 100644 (file)
@@ -104,7 +104,7 @@ static void doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcForma
        sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
        sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        asm volatile ("emms\n\t");
 #endif
             
@@ -199,14 +199,14 @@ int main(int argc, char **argv){
                        rgb_data[ x + y*4*W]= random();
                }
        }
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
 #else
        sws_rgb2rgb_init(0);
 #endif
        sws_scale(sws, rgb_src, rgb_stride, 0, H   , src, stride);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        asm volatile ("emms\n\t");
 #endif
 
index 4b7eec8..6e94197 100644 (file)
@@ -145,7 +145,7 @@ write special BGR->BGR scaler
 #define MIN(a,b) ((a) > (b) ? (b) : (a))
 #define MAX(a,b) ((a) < (b) ? (b) : (a))
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static uint64_t attribute_used __attribute__((aligned(8))) bF8=       0xF8F8F8F8F8F8F8F8LL;
 static uint64_t attribute_used __attribute__((aligned(8))) bFC=       0xFCFCFCFCFCFCFCFCLL;
 static uint64_t __attribute__((aligned(8))) w10=       0x0010001000100010LL;
@@ -204,7 +204,7 @@ extern const uint8_t dither_8x8_32[8][8];
 extern const uint8_t dither_8x8_73[8][8];
 extern const uint8_t dither_8x8_220[8][8];
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 void in_asm_used_var_warning_killer()
 {
  volatile int i= bF8+bFC+w10+
@@ -679,7 +679,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
 #endif //HAVE_ALTIVEC
 #endif //ARCH_POWERPC
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
 #define COMPILE_MMX
@@ -692,7 +692,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
 #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
 #define COMPILE_3DNOW
 #endif
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 #undef HAVE_MMX
 #undef HAVE_MMX2
@@ -716,7 +716,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
 #endif
 #endif //ARCH_POWERPC
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 //X86 versions
 /*
@@ -758,7 +758,7 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
 #include "swscale_template.c"
 #endif
 
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
 
 // minor note: the HAVE_xyz is messed up after that line so don't use it
 
@@ -783,7 +783,7 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out
        int minFilterSize;
        double *filter=NULL;
        double *filter2=NULL;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        if(flags & SWS_CPU_CAPS_MMX)
                asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
 #endif
@@ -1142,17 +1142,17 @@ static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *out
        free(filter);
 }
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
 {
        uint8_t *fragmentA;
-       int imm8OfPShufW1A;
-       int imm8OfPShufW2A;
-       int fragmentLengthA;
+       long imm8OfPShufW1A;
+       long imm8OfPShufW2A;
+       long fragmentLengthA;
        uint8_t *fragmentB;
-       int imm8OfPShufW1B;
-       int imm8OfPShufW2B;
-       int fragmentLengthB;
+       long imm8OfPShufW1B;
+       long imm8OfPShufW2B;
+       long fragmentLengthB;
        int fragmentPos;
 
        int xpos, i;
@@ -1165,9 +1165,9 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
                "jmp 9f                         \n\t"
        // Begin
                "0:                             \n\t"
-               "movq (%%edx, %%eax), %%mm3     \n\t" 
-               "movd (%%ecx, %%esi), %%mm0     \n\t" 
-               "movd 1(%%ecx, %%esi), %%mm1    \n\t"
+               "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t" 
+               "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t" 
+               "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
                "punpcklbw %%mm7, %%mm0         \n\t"
                "pshufw $0xFF, %%mm1, %%mm1     \n\t"
@@ -1175,26 +1175,26 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
                "pshufw $0xFF, %%mm0, %%mm0     \n\t"
                "2:                             \n\t"
                "psubw %%mm1, %%mm0             \n\t"
-               "movl 8(%%ebx, %%eax), %%esi    \n\t"
+               "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
                "pmullw %%mm3, %%mm0            \n\t"
                "psllw $7, %%mm1                \n\t"
                "paddw %%mm1, %%mm0             \n\t"
 
-               "movq %%mm0, (%%edi, %%eax)     \n\t"
+               "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
 
-               "addl $8, %%eax                 \n\t"
+               "add $8, %%"REG_a"              \n\t"
        // End
                "9:                             \n\t"
 //             "int $3\n\t"
-               "leal 0b, %0                    \n\t"
-               "leal 1b, %1                    \n\t"
-               "leal 2b, %2                    \n\t"
-               "decl %1                        \n\t"
-               "decl %2                        \n\t"
-               "subl %0, %1                    \n\t"
-               "subl %0, %2                    \n\t"
-               "leal 9b, %3                    \n\t"
-               "subl %0, %3                    \n\t"
+               "lea 0b, %0                     \n\t"
+               "lea 1b, %1                     \n\t"
+               "lea 2b, %2                     \n\t"
+               "dec %1                         \n\t"
+               "dec %2                         \n\t"
+               "sub %0, %1                     \n\t"
+               "sub %0, %2                     \n\t"
+               "lea 9b, %3                     \n\t"
+               "sub %0, %3                     \n\t"
 
 
                :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
@@ -1205,34 +1205,34 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
                "jmp 9f                         \n\t"
        // Begin
                "0:                             \n\t"
-               "movq (%%edx, %%eax), %%mm3     \n\t" 
-               "movd (%%ecx, %%esi), %%mm0     \n\t" 
+               "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t" 
+               "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t" 
                "punpcklbw %%mm7, %%mm0         \n\t"
                "pshufw $0xFF, %%mm0, %%mm1     \n\t"
                "1:                             \n\t"
                "pshufw $0xFF, %%mm0, %%mm0     \n\t"
                "2:                             \n\t"
                "psubw %%mm1, %%mm0             \n\t"
-               "movl 8(%%ebx, %%eax), %%esi    \n\t"
+               "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
                "pmullw %%mm3, %%mm0            \n\t"
                "psllw $7, %%mm1                \n\t"
                "paddw %%mm1, %%mm0             \n\t"
 
-               "movq %%mm0, (%%edi, %%eax)     \n\t"
+               "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
 
-               "addl $8, %%eax                 \n\t"
+               "add $8, %%"REG_a"              \n\t"
        // End
                "9:                             \n\t"
 //             "int $3\n\t"
-               "leal 0b, %0                    \n\t"
-               "leal 1b, %1                    \n\t"
-               "leal 2b, %2                    \n\t"
-               "decl %1                        \n\t"
-               "decl %2                        \n\t"
-               "subl %0, %1                    \n\t"
-               "subl %0, %2                    \n\t"
-               "leal 9b, %3                    \n\t"
-               "subl %0, %3                    \n\t"
+               "lea 0b, %0                     \n\t"
+               "lea 1b, %1                     \n\t"
+               "lea 2b, %2                     \n\t"
+               "dec %1                         \n\t"
+               "dec %2                         \n\t"
+               "sub %0, %1                     \n\t"
+               "sub %0, %2                     \n\t"
+               "lea 9b, %3                     \n\t"
+               "sub %0, %3                     \n\t"
 
 
                :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
@@ -1313,7 +1313,7 @@ static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *fil
        }
        filterPos[i/2]= xpos>>16; // needed to jump to the next part
 }
-#endif // ARCH_X86
+#endif // ARCH_X86 || ARCH_X86_64
 
 static void globalInit(){
     // generating tables:
@@ -1327,7 +1327,7 @@ static void globalInit(){
 static SwsFunc getSwsFunc(int flags){
     
 #ifdef RUNTIME_CPUDETECT
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        // ordered per speed fasterst first
        if(flags & SWS_CPU_CAPS_MMX2)
                return swScale_MMX2;
@@ -1755,7 +1755,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
        int unscaled, needsDither;
        int srcFormat, dstFormat;
        SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
        if(flags & SWS_CPU_CAPS_MMX)
                asm volatile("emms\n\t"::: "memory");
 #endif
@@ -1995,7 +1995,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
                                 (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
                                 srcFilter->chrH, dstFilter->chrH, c->param);
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 // can't downscale !!!
                if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
                {
@@ -2136,7 +2136,7 @@ SwsContext *sws_getContext(int srcW, int srcH, int origSrcFormat, int dstW, int
                }
                else
                {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
                        MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
 #else
                        if(flags & SWS_FAST_BILINEAR)
index 000f2e2..ab6c835 100644 (file)
@@ -16,6 +16,7 @@
     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
+#undef REAL_MOVNTQ
 #undef MOVNTQ
 #undef PAVGB
 #undef PREFETCH
 #endif
 
 #ifdef HAVE_MMX2
-#define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
 #else
-#define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
 #endif
+#define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
 
 #ifdef HAVE_ALTIVEC
 #include "swscale_altivec_template.c"
 #endif
 
 #define YSCALEYUV2YV12X(x, offset) \
-                       "xorl %%eax, %%eax              \n\t"\
+                       "xor %%"REG_a", %%"REG_a"       \n\t"\
                        "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
                        "movq %%mm3, %%mm4              \n\t"\
-                       "leal " offset "(%0), %%edx     \n\t"\
-                       "movl (%%edx), %%esi            \n\t"\
+                       "lea " offset "(%0), %%"REG_d"  \n\t"\
+                       "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                        ".balign 16                     \n\t" /* FIXME Unroll? */\
                        "1:                             \n\t"\
-                       "movq 8(%%edx), %%mm0           \n\t" /* filterCoeff */\
-                       "movq " #x "(%%esi, %%eax, 2), %%mm2    \n\t" /* srcData */\
-                       "movq 8+" #x "(%%esi, %%eax, 2), %%mm5  \n\t" /* srcData */\
-                       "addl $16, %%edx                \n\t"\
-                       "movl (%%edx), %%esi            \n\t"\
-                       "testl %%esi, %%esi             \n\t"\
+                       "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
+                       "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+                       "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+                       "add $16, %%"REG_d"             \n\t"\
+                       "mov (%%"REG_d"), %%"REG_S"     \n\t"\
+                       "test %%"REG_S", %%"REG_S"      \n\t"\
                        "pmulhw %%mm0, %%mm2            \n\t"\
                        "pmulhw %%mm0, %%mm5            \n\t"\
                        "paddw %%mm2, %%mm3             \n\t"\
                        "psraw $3, %%mm3                \n\t"\
                        "psraw $3, %%mm4                \n\t"\
                        "packuswb %%mm4, %%mm3          \n\t"\
-                       MOVNTQ(%%mm3, (%1, %%eax))\
-                       "addl $8, %%eax                 \n\t"\
-                       "cmpl %2, %%eax                 \n\t"\
+                       MOVNTQ(%%mm3, (%1, %%REGa))\
+                       "add $8, %%"REG_a"              \n\t"\
+                       "cmp %2, %%"REG_a"              \n\t"\
                        "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
                        "movq %%mm3, %%mm4              \n\t"\
-                       "leal " offset "(%0), %%edx     \n\t"\
-                       "movl (%%edx), %%esi            \n\t"\
+                       "lea " offset "(%0), %%"REG_d"  \n\t"\
+                       "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                        "jb 1b                          \n\t"
 
 #define YSCALEYUV2YV121 \
-                       "movl %2, %%eax                 \n\t"\
+                       "mov %2, %%"REG_a"              \n\t"\
                        ".balign 16                     \n\t" /* FIXME Unroll? */\
                        "1:                             \n\t"\
-                       "movq (%0, %%eax, 2), %%mm0     \n\t"\
-                       "movq 8(%0, %%eax, 2), %%mm1    \n\t"\
+                       "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+                       "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
                        "psraw $7, %%mm0                \n\t"\
                        "psraw $7, %%mm1                \n\t"\
                        "packuswb %%mm1, %%mm0          \n\t"\
-                       MOVNTQ(%%mm0, (%1, %%eax))\
-                       "addl $8, %%eax                 \n\t"\
+                       MOVNTQ(%%mm0, (%1, %%REGa))\
+                       "add $8, %%"REG_a"              \n\t"\
                        "jnc 1b                         \n\t"
 
 /*
                        : "%eax", "%ebx", "%ecx", "%edx", "%esi"
 */
 #define YSCALEYUV2PACKEDX \
-               "xorl %%eax, %%eax              \n\t"\
+               "xor %%"REG_a", %%"REG_a"       \n\t"\
                ".balign 16                     \n\t"\
                "nop                            \n\t"\
                "1:                             \n\t"\
-               "leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx        \n\t"\
-               "movl (%%edx), %%esi            \n\t"\
+               "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+               "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
                "movq %%mm3, %%mm4              \n\t"\
                ".balign 16                     \n\t"\
                "2:                             \n\t"\
-               "movq 8(%%edx), %%mm0           \n\t" /* filterCoeff */\
-               "movq (%%esi, %%eax), %%mm2     \n\t" /* UsrcData */\
-               "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
-               "addl $16, %%edx                \n\t"\
-               "movl (%%edx), %%esi            \n\t"\
+               "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
+               "movq (%%"REG_S", %%"REG_a"), %%mm2     \n\t" /* UsrcData */\
+               "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+               "add $16, %%"REG_d"             \n\t"\
+               "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                "pmulhw %%mm0, %%mm2            \n\t"\
                "pmulhw %%mm0, %%mm5            \n\t"\
                "paddw %%mm2, %%mm3             \n\t"\
                "paddw %%mm5, %%mm4             \n\t"\
-               "testl %%esi, %%esi             \n\t"\
+               "test %%"REG_S", %%"REG_S"      \n\t"\
                " jnz 2b                        \n\t"\
 \
-               "leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx        \n\t"\
-               "movl (%%edx), %%esi            \n\t"\
+               "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+               "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
                "movq %%mm1, %%mm7              \n\t"\
                ".balign 16                     \n\t"\
                "2:                             \n\t"\
-               "movq 8(%%edx), %%mm0           \n\t" /* filterCoeff */\
-               "movq (%%esi, %%eax, 2), %%mm2  \n\t" /* Y1srcData */\
-               "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
-               "addl $16, %%edx                \n\t"\
-               "movl (%%edx), %%esi            \n\t"\
+               "movq 8(%%"REG_d"), %%mm0       \n\t" /* filterCoeff */\
+               "movq (%%"REG_S", %%"REG_a", 2), %%mm2  \n\t" /* Y1srcData */\
+               "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+               "add $16, %%"REG_d"             \n\t"\
+               "mov (%%"REG_d"), %%"REG_S"     \n\t"\
                "pmulhw %%mm0, %%mm2            \n\t"\
                "pmulhw %%mm0, %%mm5            \n\t"\
                "paddw %%mm2, %%mm1             \n\t"\
                "paddw %%mm5, %%mm7             \n\t"\
-               "testl %%esi, %%esi             \n\t"\
+               "test %%"REG_S", %%"REG_S"      \n\t"\
                " jnz 2b                        \n\t"\
 
 
                "movd %7, %%mm5                 \n\t" /*uvalpha1*/\
                "punpcklwd %%mm5, %%mm5         \n\t"\
                "punpcklwd %%mm5, %%mm5         \n\t"\
-               "xorl %%eax, %%eax              \n\t"\
+               "xor %%"REG_a", %%"REG_a"               \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
-               "movq (%0, %%eax, 2), %%mm0     \n\t" /*buf0[eax]*/\
-               "movq (%1, %%eax, 2), %%mm1     \n\t" /*buf1[eax]*/\
-               "movq (%2, %%eax,2), %%mm2      \n\t" /* uvbuf0[eax]*/\
-               "movq (%3, %%eax,2), %%mm3      \n\t" /* uvbuf1[eax]*/\
+               "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
+               "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
+               "movq (%2, %%"REG_a",2), %%mm2  \n\t" /* uvbuf0[eax]*/\
+               "movq (%3, %%"REG_a",2), %%mm3  \n\t" /* uvbuf1[eax]*/\
                "psubw %%mm1, %%mm0             \n\t" /* buf0[eax] - buf1[eax]*/\
                "psubw %%mm3, %%mm2             \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
                "pmulhw %%mm6, %%mm0            \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
                "pmulhw %%mm5, %%mm2            \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
                "psraw $4, %%mm1                \n\t" /* buf0[eax] - buf1[eax] >>4*/\
-               "movq 4096(%2, %%eax,2), %%mm4  \n\t" /* uvbuf0[eax+2048]*/\
+               "movq 4096(%2, %%"REG_a",2), %%mm4      \n\t" /* uvbuf0[eax+2048]*/\
                "psraw $4, %%mm3                \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
                "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
-               "movq 4096(%3, %%eax,2), %%mm0  \n\t" /* uvbuf1[eax+2048]*/\
+               "movq 4096(%3, %%"REG_a",2), %%mm0      \n\t" /* uvbuf1[eax+2048]*/\
                "paddw %%mm2, %%mm3             \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
                "psubw %%mm0, %%mm4             \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
                "psubw "MANGLE(w80)", %%mm1     \n\t" /* 8(Y-16)*/\
                "packuswb %%mm1, %%mm1          \n\t"
 #endif
 
-#define YSCALEYUV2PACKED(index, c) \
+#define REAL_YSCALEYUV2PACKED(index, c) \
                "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
                "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
                "psraw $3, %%mm0                \n\t"\
                "psraw $3, %%mm1                \n\t"\
                "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
                "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
-               "xorl "#index", "#index"                \n\t"\
+               "xor "#index", "#index"         \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
                "paddw %%mm0, %%mm1             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
                "paddw %%mm6, %%mm7             \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
                 
-#define YSCALEYUV2RGB(index, c) \
-               "xorl "#index", "#index"        \n\t"\
+#define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
+                
+#define REAL_YSCALEYUV2RGB(index, c) \
+               "xor "#index", "#index" \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
                "packuswb %%mm6, %%mm5          \n\t"\
                "packuswb %%mm3, %%mm4          \n\t"\
                "pxor %%mm7, %%mm7              \n\t"
+#define YSCALEYUV2RGB(index, c)  REAL_YSCALEYUV2RGB(index, c)
                 
-#define YSCALEYUV2PACKED1(index, c) \
-               "xorl "#index", "#index"                \n\t"\
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+               "xor "#index", "#index"         \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
                "psraw $7, %%mm1                \n\t" \
                "psraw $7, %%mm7                \n\t" \
                 
-#define YSCALEYUV2RGB1(index, c) \
-               "xorl "#index", "#index"        \n\t"\
+#define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
+                
+#define REAL_YSCALEYUV2RGB1(index, c) \
+               "xor "#index", "#index" \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
                "packuswb %%mm6, %%mm5          \n\t"\
                "packuswb %%mm3, %%mm4          \n\t"\
                "pxor %%mm7, %%mm7              \n\t"
+#define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
 
-#define YSCALEYUV2PACKED1b(index, c) \
-               "xorl "#index", "#index"                \n\t"\
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+               "xor "#index", "#index"         \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
                "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
                "psraw $7, %%mm1                \n\t" \
                "psraw $7, %%mm7                \n\t" 
+#define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
                 
 // do vertical chrominance interpolation
-#define YSCALEYUV2RGB1b(index, c) \
-               "xorl "#index", "#index"                \n\t"\
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+               "xor "#index", "#index"         \n\t"\
                ".balign 16                     \n\t"\
                "1:                             \n\t"\
                "movq (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
                "packuswb %%mm6, %%mm5          \n\t"\
                "packuswb %%mm3, %%mm4          \n\t"\
                "pxor %%mm7, %%mm7              \n\t"
+#define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
 
-#define WRITEBGR32(dst, dstw, index) \
+#define REAL_WRITEBGR32(dst, dstw, index) \
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
                        "movq %%mm2, %%mm1              \n\t" /* B */\
                        "movq %%mm5, %%mm6              \n\t" /* R */\
                        MOVNTQ(%%mm1, 16(dst, index, 4))\
                        MOVNTQ(%%mm3, 24(dst, index, 4))\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
+#define WRITEBGR32(dst, dstw, index)  REAL_WRITEBGR32(dst, dstw, index)
 
-#define WRITEBGR16(dst, dstw, index) \
+#define REAL_WRITEBGR16(dst, dstw, index) \
                        "pand "MANGLE(bF8)", %%mm2      \n\t" /* B */\
                        "pand "MANGLE(bFC)", %%mm4      \n\t" /* G */\
                        "pand "MANGLE(bF8)", %%mm5      \n\t" /* R */\
                        MOVNTQ(%%mm2, (dst, index, 2))\
                        MOVNTQ(%%mm1, 8(dst, index, 2))\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
+#define WRITEBGR16(dst, dstw, index)  REAL_WRITEBGR16(dst, dstw, index)
 
-#define WRITEBGR15(dst, dstw, index) \
+#define REAL_WRITEBGR15(dst, dstw, index) \
                        "pand "MANGLE(bF8)", %%mm2      \n\t" /* B */\
                        "pand "MANGLE(bF8)", %%mm4      \n\t" /* G */\
                        "pand "MANGLE(bF8)", %%mm5      \n\t" /* R */\
                        MOVNTQ(%%mm2, (dst, index, 2))\
                        MOVNTQ(%%mm1, 8(dst, index, 2))\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
+#define WRITEBGR15(dst, dstw, index)  REAL_WRITEBGR15(dst, dstw, index)
 
 #define WRITEBGR24OLD(dst, dstw, index) \
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
                        MOVNTQ(%%mm0, (dst))\
                        MOVNTQ(%%mm2, 8(dst))\
                        MOVNTQ(%%mm3, 16(dst))\
-                       "addl $24, "#dst"               \n\t"\
+                       "add $24, "#dst"                \n\t"\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
 
 #define WRITEBGR24MMX(dst, dstw, index) \
                        "por %%mm3, %%mm5               \n\t" /* RGBRGBRG 2 */\
                        MOVNTQ(%%mm5, 16(dst))\
 \
-                       "addl $24, "#dst"               \n\t"\
+                       "add $24, "#dst"                \n\t"\
 \
-                       "addl $8, "#index"                      \n\t"\
-                       "cmpl "#dstw", "#index"                 \n\t"\
+                       "add $8, "#index"                       \n\t"\
+                       "cmp "#dstw", "#index"                  \n\t"\
                        " jb 1b                         \n\t"
 
 #define WRITEBGR24MMX2(dst, dstw, index) \
                        "por %%mm3, %%mm6               \n\t"\
                        MOVNTQ(%%mm6, 16(dst))\
 \
-                       "addl $24, "#dst"               \n\t"\
+                       "add $24, "#dst"                \n\t"\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
 
 #ifdef HAVE_MMX2
 #undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX2
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
 #else
 #undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
 #endif
 
-#define WRITEYUY2(dst, dstw, index) \
+#define REAL_WRITEYUY2(dst, dstw, index) \
                        "packuswb %%mm3, %%mm3          \n\t"\
                        "packuswb %%mm4, %%mm4          \n\t"\
                        "packuswb %%mm7, %%mm1          \n\t"\
                        MOVNTQ(%%mm1, (dst, index, 2))\
                        MOVNTQ(%%mm7, 8(dst, index, 2))\
 \
-                       "addl $8, "#index"              \n\t"\
-                       "cmpl "#dstw", "#index"         \n\t"\
+                       "add $8, "#index"               \n\t"\
+                       "cmp "#dstw", "#index"          \n\t"\
                        " jb 1b                         \n\t"
+#define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
 
 
 static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
@@ -751,23 +765,23 @@ static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t *
                asm volatile(
                                YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
                                :: "r" (&c->redDither),
-                               "r" (uDest), "m" (chrDstW)
-                               : "%eax", "%edx", "%esi"
+                               "r" (uDest), "m" ((long)chrDstW)
+                               : "%"REG_a, "%"REG_d, "%"REG_S
                        );
 
                asm volatile(
                                YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
                                :: "r" (&c->redDither),
-                               "r" (vDest), "m" (chrDstW)
-                               : "%eax", "%edx", "%esi"
+                               "r" (vDest), "m" ((long)chrDstW)
+                               : "%"REG_a, "%"REG_d, "%"REG_S
                        );
        }
 
        asm volatile(
                        YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
                        :: "r" (&c->redDither),
-                          "r" (dest), "m" (dstW)
-                       : "%eax", "%edx", "%esi"
+                          "r" (dest), "m" ((long)dstW)
+                       : "%"REG_a, "%"REG_d, "%"REG_S
                );
 #else
 #ifdef HAVE_ALTIVEC
@@ -791,23 +805,23 @@ static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
                asm volatile(
                                YSCALEYUV2YV121
                                :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
-                               "g" (-chrDstW)
-                               : "%eax"
+                               "g" ((long)-chrDstW)
+                               : "%"REG_a
                        );
 
                asm volatile(
                                YSCALEYUV2YV121
                                :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
-                               "g" (-chrDstW)
-                               : "%eax"
+                               "g" ((long)-chrDstW)
+                               : "%"REG_a
                        );
        }
 
        asm volatile(
                YSCALEYUV2YV121
                :: "r" (lumSrc + dstW), "r" (dest + dstW),
-               "g" (-dstW)
-               : "%eax"
+               "g" ((long)-dstW)
+               : "%"REG_a
        );
 #else
        int i;
@@ -858,12 +872,12 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
                {
                        asm volatile(
                                YSCALEYUV2RGBX
-                               WRITEBGR32(%4, %5, %%eax)
+                               WRITEBGR32(%4, %5, %%REGa)
 
                        :: "r" (&c->redDither), 
                           "m" (dummy), "m" (dummy), "m" (dummy),
                           "r" (dest), "m" (dstW)
-                       : "%eax", "%edx", "%esi"
+                       : "%"REG_a, "%"REG_d, "%"REG_S
                        );
                }
                break;
@@ -871,14 +885,14 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
                {
                        asm volatile(
                                YSCALEYUV2RGBX
-                               "leal (%%eax, %%eax, 2), %%ebx  \n\t" //FIXME optimize
-                               "addl %4, %%ebx                 \n\t"
-                               WRITEBGR24(%%ebx, %5, %%eax)
+                               "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+                               "add %4, %%"REG_b"                      \n\t"
+                               WRITEBGR24(%%REGb, %5, %%REGa)
 
                        :: "r" (&c->redDither), 
                           "m" (dummy), "m" (dummy), "m" (dummy),
                           "r" (dest), "m" (dstW)
-                       : "%eax", "%ebx", "%edx", "%esi" //FIXME ebx
+                       : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
                        );
                }
                break;
@@ -893,12 +907,12 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR15(%4, %5, %%eax)
+                               WRITEBGR15(%4, %5, %%REGa)
 
                        :: "r" (&c->redDither), 
                           "m" (dummy), "m" (dummy), "m" (dummy),
                           "r" (dest), "m" (dstW)
-                       : "%eax", "%edx", "%esi"
+                       : "%"REG_a, "%"REG_d, "%"REG_S
                        );
                }
                break;
@@ -913,12 +927,12 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR16(%4, %5, %%eax)
+                               WRITEBGR16(%4, %5, %%REGa)
 
                        :: "r" (&c->redDither), 
                           "m" (dummy), "m" (dummy), "m" (dummy),
                           "r" (dest), "m" (dstW)
-                       : "%eax", "%edx", "%esi"
+                       : "%"REG_a, "%"REG_d, "%"REG_S
                        );
                }
                break;
@@ -932,12 +946,12 @@ static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_
                                "psraw $3, %%mm4                \n\t"
                                "psraw $3, %%mm1                \n\t"
                                "psraw $3, %%mm7                \n\t"
-                               WRITEYUY2(%4, %5, %%eax)
+                               WRITEYUY2(%4, %5, %%REGa)
 
                        :: "r" (&c->redDither), 
                           "m" (dummy), "m" (dummy), "m" (dummy),
                           "r" (dest), "m" (dstW)
-                       : "%eax", "%edx", "%esi"
+                       : "%"REG_a, "%"REG_d, "%"REG_S
                        );
                }
                break;
@@ -984,17 +998,17 @@ FULL_YSCALEYUV2RGB
                        "punpcklwd %%mm0, %%mm3         \n\t" // BGR0BGR0
                        "punpckhwd %%mm0, %%mm1         \n\t" // BGR0BGR0
 
-                       MOVNTQ(%%mm3, (%4, %%eax, 4))
-                       MOVNTQ(%%mm1, 8(%4, %%eax, 4))
+                       MOVNTQ(%%mm3, (%4, %%REGa, 4))
+                       MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
 
-                       "addl $4, %%eax                 \n\t"
-                       "cmpl %5, %%eax                 \n\t"
+                       "add $4, %%"REG_a"              \n\t"
+                       "cmp %5, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
 
 
-                       :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+                       :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
                        "m" (yalpha1), "m" (uvalpha1)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        break;
                case IMGFMT_BGR24:
@@ -1024,26 +1038,26 @@ FULL_YSCALEYUV2RGB
                        "psrlq $24, %%mm1               \n\t" // 0BGR0000
                        "por %%mm2, %%mm1               \n\t" // RBGRR000
 
-                       "movl %4, %%ebx                 \n\t"
-                       "addl %%eax, %%ebx              \n\t"
+                       "mov %4, %%"REG_b"              \n\t"
+                       "add %%"REG_a", %%"REG_b"       \n\t"
 
 #ifdef HAVE_MMX2
                        //FIXME Alignment
-                       "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
-                       "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
+                       "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
+                       "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
 #else
-                       "movd %%mm3, (%%ebx, %%eax, 2)  \n\t"
+                       "movd %%mm3, (%%"REG_b", %%"REG_a", 2)  \n\t"
                        "psrlq $32, %%mm3               \n\t"
-                       "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
-                       "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
+                       "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
+                       "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
 #endif
-                       "addl $4, %%eax                 \n\t"
-                       "cmpl %5, %%eax                 \n\t"
+                       "add $4, %%"REG_a"              \n\t"
+                       "cmp %5, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
                        "m" (yalpha1), "m" (uvalpha1)
-                       : "%eax", "%ebx"
+                       : "%"REG_a, "%"REG_b
                        );
                        break;
                case IMGFMT_BGR15:
@@ -1068,15 +1082,15 @@ FULL_YSCALEYUV2RGB
                        "por %%mm3, %%mm1               \n\t"
                        "por %%mm1, %%mm0               \n\t"
 
-                       MOVNTQ(%%mm0, (%4, %%eax, 2))
+                       MOVNTQ(%%mm0, (%4, %%REGa, 2))
 
-                       "addl $4, %%eax                 \n\t"
-                       "cmpl %5, %%eax                 \n\t"
+                       "add $4, %%"REG_a"              \n\t"
+                       "cmp %5, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
                        "m" (yalpha1), "m" (uvalpha1)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        break;
                case IMGFMT_BGR16:
@@ -1101,15 +1115,15 @@ FULL_YSCALEYUV2RGB
                        "por %%mm3, %%mm1               \n\t"
                        "por %%mm1, %%mm0               \n\t"
 
-                       MOVNTQ(%%mm0, (%4, %%eax, 2))
+                       MOVNTQ(%%mm0, (%4, %%REGa, 2))
 
-                       "addl $4, %%eax                 \n\t"
-                       "cmpl %5, %%eax                 \n\t"
+                       "add $4, %%"REG_a"              \n\t"
+                       "cmp %5, %%"REG_a"              \n\t"
                        " jb 1b                         \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
                        "m" (yalpha1), "m" (uvalpha1)
-                       : "%eax"
+                       : "%"REG_a
                        );
                break;
 #endif
@@ -1188,34 +1202,34 @@ FULL_YSCALEYUV2RGB
 //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
        case IMGFMT_BGR32:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB(%%eax, %5)
-                               WRITEBGR32(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB(%%REGa, %5)
+                               WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
        case IMGFMT_BGR24:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                 \n\t"
-                               YSCALEYUV2RGB(%%eax, %5)
-                               WRITEBGR24(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB(%%REGa, %5)
+                               WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
        case IMGFMT_BGR15:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1223,19 +1237,19 @@ FULL_YSCALEYUV2RGB
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR15(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
        case IMGFMT_BGR16:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1243,23 +1257,23 @@ FULL_YSCALEYUV2RGB
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR16(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
        case IMGFMT_YUY2:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2PACKED(%%eax, %5)
-                               WRITEYUY2(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2PACKED(%%REGa, %5)
+                               WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
        default: break;
@@ -1293,54 +1307,54 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
                {
                case IMGFMT_BGR32:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1(%%eax, %5)
-                               WRITEBGR32(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1(%%REGa, %5)
+                               WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR24:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1(%%eax, %5)
-                               WRITEBGR24(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1(%%REGa, %5)
+                               WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR15:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
                                "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
-                               WRITEBGR15(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR16:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1348,25 +1362,25 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR16(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_YUY2:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2PACKED1(%%eax, %5)
-                               WRITEYUY2(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2PACKED1(%%REGa, %5)
+                               WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                }
@@ -1377,54 +1391,54 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
                {
                case IMGFMT_BGR32:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1b(%%eax, %5)
-                               WRITEBGR32(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1b(%%REGa, %5)
+                               WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR24:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1b(%%eax, %5)
-                               WRITEBGR24(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1b(%%REGa, %5)
+                               WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR15:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1b(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1b(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
                                "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
-                               WRITEBGR15(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_BGR16:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2RGB1b(%%eax, %5)
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2RGB1b(%%REGa, %5)
                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
 #ifdef DITHER1XBPP
                                "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1432,25 +1446,25 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
                                "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
 #endif
 
-                               WRITEBGR16(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                case IMGFMT_YUY2:
                        asm volatile(
-                               "movl %%esp, "ESP_OFFSET"(%5)           \n\t"
-                               "movl %4, %%esp                         \n\t"
-                               YSCALEYUV2PACKED1b(%%eax, %5)
-                               WRITEYUY2(%%esp, 8280(%5), %%eax)
-                               "movl "ESP_OFFSET"(%5), %%esp           \n\t"
+                               "mov %%"REG_SP", "ESP_OFFSET"(%5)       \n\t"
+                               "mov %4, %%"REG_SP"                     \n\t"
+                               YSCALEYUV2PACKED1b(%%REGa, %5)
+                               WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+                               "mov "ESP_OFFSET"(%5), %%"REG_SP"       \n\t"
 
                        :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
                        "r" (&c->redDither)
-                       : "%eax"
+                       : "%"REG_a
                        );
                        return;
                }
@@ -1471,18 +1485,18 @@ static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
 #ifdef HAVE_MMX
        asm volatile(
                "movq "MANGLE(bm01010101)", %%mm2\n\t"
-               "movl %0, %%eax                 \n\t"
+               "mov %0, %%"REG_a"              \n\t"
                "1:                             \n\t"
-               "movq (%1, %%eax,2), %%mm0      \n\t"
-               "movq 8(%1, %%eax,2), %%mm1     \n\t"
+               "movq (%1, %%"REG_a",2), %%mm0  \n\t"
+               "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
                "pand %%mm2, %%mm0              \n\t"
                "pand %%mm2, %%mm1              \n\t"
                "packuswb %%mm1, %%mm0          \n\t"
-               "movq %%mm0, (%2, %%eax)        \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movq %%mm0, (%2, %%"REG_a")    \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "g" (-width), "r" (src+width*2), "r" (dst+width)
-               : "%eax"
+               : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+               : "%"REG_a
        );
 #else
        int i;
@@ -1496,12 +1510,12 @@ static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1,
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
        asm volatile(
                "movq "MANGLE(bm01010101)", %%mm4\n\t"
-               "movl %0, %%eax                 \n\t"
+               "mov %0, %%"REG_a"              \n\t"
                "1:                             \n\t"
-               "movq (%1, %%eax,4), %%mm0      \n\t"
-               "movq 8(%1, %%eax,4), %%mm1     \n\t"
-               "movq (%2, %%eax,4), %%mm2      \n\t"
-               "movq 8(%2, %%eax,4), %%mm3     \n\t"
+               "movq (%1, %%"REG_a",4), %%mm0  \n\t"
+               "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+               "movq (%2, %%"REG_a",4), %%mm2  \n\t"
+               "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
                PAVGB(%%mm2, %%mm0)
                PAVGB(%%mm3, %%mm1)
                "psrlw $8, %%mm0                \n\t"
@@ -1512,12 +1526,12 @@ static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1,
                "pand %%mm4, %%mm1              \n\t"
                "packuswb %%mm0, %%mm0          \n\t"
                "packuswb %%mm1, %%mm1          \n\t"
-               "movd %%mm0, (%4, %%eax)        \n\t"
-               "movd %%mm1, (%3, %%eax)        \n\t"
-               "addl $4, %%eax                 \n\t"
+               "movd %%mm0, (%4, %%"REG_a")    \n\t"
+               "movd %%mm1, (%3, %%"REG_a")    \n\t"
+               "add $4, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
-               : "%eax"
+               : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+               : "%"REG_a
        );
 #else
        int i;
@@ -1534,18 +1548,18 @@ static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, int width)
 {
 #ifdef HAVE_MMX
        asm volatile(
-               "movl %0, %%eax                 \n\t"
+               "mov %0, %%"REG_a"              \n\t"
                "1:                             \n\t"
-               "movq (%1, %%eax,2), %%mm0      \n\t"
-               "movq 8(%1, %%eax,2), %%mm1     \n\t"
+               "movq (%1, %%"REG_a",2), %%mm0  \n\t"
+               "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
                "psrlw $8, %%mm0                \n\t"
                "psrlw $8, %%mm1                \n\t"
                "packuswb %%mm1, %%mm0          \n\t"
-               "movq %%mm0, (%2, %%eax)        \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movq %%mm0, (%2, %%"REG_a")    \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "g" (-width), "r" (src+width*2), "r" (dst+width)
-               : "%eax"
+               : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+               : "%"REG_a
        );
 #else
        int i;
@@ -1559,12 +1573,12 @@ static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1,
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
        asm volatile(
                "movq "MANGLE(bm01010101)", %%mm4\n\t"
-               "movl %0, %%eax                 \n\t"
+               "mov %0, %%"REG_a"              \n\t"
                "1:                             \n\t"
-               "movq (%1, %%eax,4), %%mm0      \n\t"
-               "movq 8(%1, %%eax,4), %%mm1     \n\t"
-               "movq (%2, %%eax,4), %%mm2      \n\t"
-               "movq 8(%2, %%eax,4), %%mm3     \n\t"
+               "movq (%1, %%"REG_a",4), %%mm0  \n\t"
+               "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+               "movq (%2, %%"REG_a",4), %%mm2  \n\t"
+               "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
                PAVGB(%%mm2, %%mm0)
                PAVGB(%%mm3, %%mm1)
                "pand %%mm4, %%mm0              \n\t"
@@ -1575,12 +1589,12 @@ static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1,
                "pand %%mm4, %%mm1              \n\t"
                "packuswb %%mm0, %%mm0          \n\t"
                "packuswb %%mm1, %%mm1          \n\t"
-               "movd %%mm0, (%4, %%eax)        \n\t"
-               "movd %%mm1, (%3, %%eax)        \n\t"
-               "addl $4, %%eax                 \n\t"
+               "movd %%mm0, (%4, %%"REG_a")    \n\t"
+               "movd %%mm1, (%3, %%"REG_a")    \n\t"
+               "add $4, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
-               : "%eax"
+               : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+               : "%"REG_a
        );
 #else
        int i;
@@ -1635,20 +1649,20 @@ static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
 {
 #ifdef HAVE_MMX
        asm volatile(
-               "movl %2, %%eax                 \n\t"
+               "mov %2, %%"REG_a"              \n\t"
                "movq "MANGLE(bgr2YCoeff)", %%mm6               \n\t"
                "movq "MANGLE(w1111)", %%mm5            \n\t"
                "pxor %%mm7, %%mm7              \n\t"
-               "leal (%%eax, %%eax, 2), %%ebx  \n\t"
+               "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               PREFETCH" 64(%0, %%ebx)         \n\t"
-               "movd (%0, %%ebx), %%mm0        \n\t"
-               "movd 3(%0, %%ebx), %%mm1       \n\t"
+               PREFETCH" 64(%0, %%"REG_b")     \n\t"
+               "movd (%0, %%"REG_b"), %%mm0    \n\t"
+               "movd 3(%0, %%"REG_b"), %%mm1   \n\t"
                "punpcklbw %%mm7, %%mm0         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
-               "movd 6(%0, %%ebx), %%mm2       \n\t"
-               "movd 9(%0, %%ebx), %%mm3       \n\t"
+               "movd 6(%0, %%"REG_b"), %%mm2   \n\t"
+               "movd 9(%0, %%"REG_b"), %%mm3   \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
                "punpcklbw %%mm7, %%mm3         \n\t"
                "pmaddwd %%mm6, %%mm0           \n\t"
@@ -1668,12 +1682,12 @@ static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
                "packssdw %%mm2, %%mm0          \n\t"
                "psraw $7, %%mm0                \n\t"
 
-               "movd 12(%0, %%ebx), %%mm4      \n\t"
-               "movd 15(%0, %%ebx), %%mm1      \n\t"
+               "movd 12(%0, %%"REG_b"), %%mm4  \n\t"
+               "movd 15(%0, %%"REG_b"), %%mm1  \n\t"
                "punpcklbw %%mm7, %%mm4         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
-               "movd 18(%0, %%ebx), %%mm2      \n\t"
-               "movd 21(%0, %%ebx), %%mm3      \n\t"
+               "movd 18(%0, %%"REG_b"), %%mm2  \n\t"
+               "movd 21(%0, %%"REG_b"), %%mm3  \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
                "punpcklbw %%mm7, %%mm3         \n\t"
                "pmaddwd %%mm6, %%mm4           \n\t"
@@ -1690,18 +1704,18 @@ static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
                "packssdw %%mm3, %%mm2          \n\t"
                "pmaddwd %%mm5, %%mm4           \n\t"
                "pmaddwd %%mm5, %%mm2           \n\t"
-               "addl $24, %%ebx                \n\t"
+               "add $24, %%"REG_b"             \n\t"
                "packssdw %%mm2, %%mm4          \n\t"
                "psraw $7, %%mm4                \n\t"
 
                "packuswb %%mm4, %%mm0          \n\t"
                "paddusb "MANGLE(bgr2YOffset)", %%mm0   \n\t"
 
-               "movq %%mm0, (%1, %%eax)        \n\t"
-               "addl $8, %%eax                 \n\t"
+               "movq %%mm0, (%1, %%"REG_a")    \n\t"
+               "add $8, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "r" (src+width*3), "r" (dst+width), "g" (-width)
-               : "%eax", "%ebx"
+               : : "r" (src+width*3), "r" (dst+width), "g" ((long)-width)
+               : "%"REG_a, "%"REG_b
        );
 #else
        int i;
@@ -1720,21 +1734,21 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
 {
 #ifdef HAVE_MMX
        asm volatile(
-               "movl %4, %%eax                 \n\t"
+               "mov %4, %%"REG_a"              \n\t"
                "movq "MANGLE(w1111)", %%mm5            \n\t"
                "movq "MANGLE(bgr2UCoeff)", %%mm6               \n\t"
                "pxor %%mm7, %%mm7              \n\t"
-               "leal (%%eax, %%eax, 2), %%ebx  \n\t"
-               "addl %%ebx, %%ebx              \n\t"
+               "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"       \n\t"
+               "add %%"REG_b", %%"REG_b"       \n\t"
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               PREFETCH" 64(%0, %%ebx)         \n\t"
-               PREFETCH" 64(%1, %%ebx)         \n\t"
+               PREFETCH" 64(%0, %%"REG_b")     \n\t"
+               PREFETCH" 64(%1, %%"REG_b")     \n\t"
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-               "movq (%0, %%ebx), %%mm0        \n\t"
-               "movq (%1, %%ebx), %%mm1        \n\t"
-               "movq 6(%0, %%ebx), %%mm2       \n\t"
-               "movq 6(%1, %%ebx), %%mm3       \n\t"
+               "movq (%0, %%"REG_b"), %%mm0    \n\t"
+               "movq (%1, %%"REG_b"), %%mm1    \n\t"
+               "movq 6(%0, %%"REG_b"), %%mm2   \n\t"
+               "movq 6(%1, %%"REG_b"), %%mm3   \n\t"
                PAVGB(%%mm1, %%mm0)
                PAVGB(%%mm3, %%mm2)
                "movq %%mm0, %%mm1              \n\t"
@@ -1746,10 +1760,10 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "punpcklbw %%mm7, %%mm0         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
 #else
-               "movd (%0, %%ebx), %%mm0        \n\t"
-               "movd (%1, %%ebx), %%mm1        \n\t"
-               "movd 3(%0, %%ebx), %%mm2       \n\t"
-               "movd 3(%1, %%ebx), %%mm3       \n\t"
+               "movd (%0, %%"REG_b"), %%mm0    \n\t"
+               "movd (%1, %%"REG_b"), %%mm1    \n\t"
+               "movd 3(%0, %%"REG_b"), %%mm2   \n\t"
+               "movd 3(%1, %%"REG_b"), %%mm3   \n\t"
                "punpcklbw %%mm7, %%mm0         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
@@ -1757,10 +1771,10 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "paddw %%mm1, %%mm0             \n\t"
                "paddw %%mm3, %%mm2             \n\t"
                "paddw %%mm2, %%mm0             \n\t"
-               "movd 6(%0, %%ebx), %%mm4       \n\t"
-               "movd 6(%1, %%ebx), %%mm1       \n\t"
-               "movd 9(%0, %%ebx), %%mm2       \n\t"
-               "movd 9(%1, %%ebx), %%mm3       \n\t"
+               "movd 6(%0, %%"REG_b"), %%mm4   \n\t"
+               "movd 6(%1, %%"REG_b"), %%mm1   \n\t"
+               "movd 9(%0, %%"REG_b"), %%mm2   \n\t"
+               "movd 9(%1, %%"REG_b"), %%mm3   \n\t"
                "punpcklbw %%mm7, %%mm4         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
@@ -1792,10 +1806,10 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "psraw $7, %%mm0                \n\t"
 
 #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
-               "movq 12(%0, %%ebx), %%mm4      \n\t"
-               "movq 12(%1, %%ebx), %%mm1      \n\t"
-               "movq 18(%0, %%ebx), %%mm2      \n\t"
-               "movq 18(%1, %%ebx), %%mm3      \n\t"
+               "movq 12(%0, %%"REG_b"), %%mm4  \n\t"
+               "movq 12(%1, %%"REG_b"), %%mm1  \n\t"
+               "movq 18(%0, %%"REG_b"), %%mm2  \n\t"
+               "movq 18(%1, %%"REG_b"), %%mm3  \n\t"
                PAVGB(%%mm1, %%mm4)
                PAVGB(%%mm3, %%mm2)
                "movq %%mm4, %%mm1              \n\t"
@@ -1807,10 +1821,10 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "punpcklbw %%mm7, %%mm4         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
 #else
-               "movd 12(%0, %%ebx), %%mm4      \n\t"
-               "movd 12(%1, %%ebx), %%mm1      \n\t"
-               "movd 15(%0, %%ebx), %%mm2      \n\t"
-               "movd 15(%1, %%ebx), %%mm3      \n\t"
+               "movd 12(%0, %%"REG_b"), %%mm4  \n\t"
+               "movd 12(%1, %%"REG_b"), %%mm1  \n\t"
+               "movd 15(%0, %%"REG_b"), %%mm2  \n\t"
+               "movd 15(%1, %%"REG_b"), %%mm3  \n\t"
                "punpcklbw %%mm7, %%mm4         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
@@ -1818,10 +1832,10 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "paddw %%mm1, %%mm4             \n\t"
                "paddw %%mm3, %%mm2             \n\t"
                "paddw %%mm2, %%mm4             \n\t"
-               "movd 18(%0, %%ebx), %%mm5      \n\t"
-               "movd 18(%1, %%ebx), %%mm1      \n\t"
-               "movd 21(%0, %%ebx), %%mm2      \n\t"
-               "movd 21(%1, %%ebx), %%mm3      \n\t"
+               "movd 18(%0, %%"REG_b"), %%mm5  \n\t"
+               "movd 18(%1, %%"REG_b"), %%mm1  \n\t"
+               "movd 21(%0, %%"REG_b"), %%mm2  \n\t"
+               "movd 21(%1, %%"REG_b"), %%mm3  \n\t"
                "punpcklbw %%mm7, %%mm5         \n\t"
                "punpcklbw %%mm7, %%mm1         \n\t"
                "punpcklbw %%mm7, %%mm2         \n\t"
@@ -1850,7 +1864,7 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "packssdw %%mm3, %%mm1          \n\t"
                "pmaddwd %%mm5, %%mm4           \n\t"
                "pmaddwd %%mm5, %%mm1           \n\t"
-               "addl $24, %%ebx                \n\t"
+               "add $24, %%"REG_b"             \n\t"
                "packssdw %%mm1, %%mm4          \n\t" // V3 V2 U3 U2
                "psraw $7, %%mm4                \n\t"
                
@@ -1860,13 +1874,13 @@ static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1
                "packsswb %%mm1, %%mm0          \n\t"
                "paddb "MANGLE(bgr2UVOffset)", %%mm0    \n\t"
 
-               "movd %%mm0, (%2, %%eax)        \n\t"
+               "movd %%mm0, (%2, %%"REG_a")    \n\t"
                "punpckhdq %%mm0, %%mm0         \n\t"
-               "movd %%mm0, (%3, %%eax)        \n\t"
-               "addl $4, %%eax                 \n\t"
+               "movd %%mm0, (%3, %%"REG_a")    \n\t"
+               "add $4, %%"REG_a"              \n\t"
                " js 1b                         \n\t"
-               : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
-               : "%eax", "%ebx"
+               : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" ((long)-width)
+               : "%"REG_a, "%"REG_b
        );
 #else
        int i;
@@ -2024,23 +2038,23 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
        assert(filterSize % 4 == 0 && filterSize>0);
        if(filterSize==4) // allways true for upscaling, sometimes for down too
        {
-               int counter= -2*dstW;
+               long counter= -2*dstW;
                filter-= counter*2;
                filterPos-= counter/2;
                dst-= counter/2;
                asm volatile(
                        "pxor %%mm7, %%mm7              \n\t"
                        "movq "MANGLE(w02)", %%mm6      \n\t"
-                       "pushl %%ebp                    \n\t" // we use 7 regs here ...
-                       "movl %%eax, %%ebp              \n\t"
+                       "push %%"REG_BP"                \n\t" // we use 7 regs here ...
+                       "mov %%"REG_a", %%"REG_BP"      \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       "movzwl (%2, %%ebp), %%eax      \n\t"
-                       "movzwl 2(%2, %%ebp), %%ebx     \n\t"
-                       "movq (%1, %%ebp, 4), %%mm1     \n\t"
-                       "movq 8(%1, %%ebp, 4), %%mm3    \n\t"
-                       "movd (%3, %%eax), %%mm0        \n\t"
-                       "movd (%3, %%ebx), %%mm2        \n\t"
+                       "movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+                       "movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+                       "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
+                       "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
+                       "movd (%3, %%"REG_a"), %%mm0    \n\t"
+                       "movd (%3, %%"REG_b"), %%mm2    \n\t"
                        "punpcklbw %%mm7, %%mm0         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
                        "pmaddwd %%mm1, %%mm0           \n\t"
@@ -2050,44 +2064,44 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
                        "packssdw %%mm3, %%mm0          \n\t"
                        "pmaddwd %%mm6, %%mm0           \n\t"
                        "packssdw %%mm0, %%mm0          \n\t"
-                       "movd %%mm0, (%4, %%ebp)        \n\t"
-                       "addl $4, %%ebp                 \n\t"
+                       "movd %%mm0, (%4, %%"REG_BP")   \n\t"
+                       "add $4, %%"REG_BP"             \n\t"
                        " jnc 1b                        \n\t"
 
-                       "popl %%ebp                     \n\t"
+                       "pop %%"REG_BP"                 \n\t"
                        : "+a" (counter)
                        : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
-                       : "%ebx"
+                       : "%"REG_b
                );
        }
        else if(filterSize==8)
        {
-               int counter= -2*dstW;
+               long counter= -2*dstW;
                filter-= counter*4;
                filterPos-= counter/2;
                dst-= counter/2;
                asm volatile(
                        "pxor %%mm7, %%mm7              \n\t"
                        "movq "MANGLE(w02)", %%mm6      \n\t"
-                       "pushl %%ebp                    \n\t" // we use 7 regs here ...
-                       "movl %%eax, %%ebp              \n\t"
+                       "push %%"REG_BP"                \n\t" // we use 7 regs here ...
+                       "mov %%"REG_a", %%"REG_BP"      \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       "movzwl (%2, %%ebp), %%eax      \n\t"
-                       "movzwl 2(%2, %%ebp), %%ebx     \n\t"
-                       "movq (%1, %%ebp, 8), %%mm1     \n\t"
-                       "movq 16(%1, %%ebp, 8), %%mm3   \n\t"
-                       "movd (%3, %%eax), %%mm0        \n\t"
-                       "movd (%3, %%ebx), %%mm2        \n\t"
+                       "movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+                       "movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+                       "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
+                       "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
+                       "movd (%3, %%"REG_a"), %%mm0    \n\t"
+                       "movd (%3, %%"REG_b"), %%mm2    \n\t"
                        "punpcklbw %%mm7, %%mm0         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
                        "pmaddwd %%mm1, %%mm0           \n\t"
                        "pmaddwd %%mm2, %%mm3           \n\t"
 
-                       "movq 8(%1, %%ebp, 8), %%mm1    \n\t"
-                       "movq 24(%1, %%ebp, 8), %%mm5   \n\t"
-                       "movd 4(%3, %%eax), %%mm4       \n\t"
-                       "movd 4(%3, %%ebx), %%mm2       \n\t"
+                       "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
+                       "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
+                       "movd 4(%3, %%"REG_a"), %%mm4   \n\t"
+                       "movd 4(%3, %%"REG_b"), %%mm2   \n\t"
                        "punpcklbw %%mm7, %%mm4         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
                        "pmaddwd %%mm1, %%mm4           \n\t"
@@ -2100,19 +2114,19 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
                        "packssdw %%mm3, %%mm0          \n\t"
                        "pmaddwd %%mm6, %%mm0           \n\t"
                        "packssdw %%mm0, %%mm0          \n\t"
-                       "movd %%mm0, (%4, %%ebp)        \n\t"
-                       "addl $4, %%ebp                 \n\t"
+                       "movd %%mm0, (%4, %%"REG_BP")   \n\t"
+                       "add $4, %%"REG_BP"             \n\t"
                        " jnc 1b                        \n\t"
 
-                       "popl %%ebp                     \n\t"
+                       "pop %%"REG_BP"                 \n\t"
                        : "+a" (counter)
                        : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
-                       : "%ebx"
+                       : "%"REG_b
                );
        }
        else
        {
-               int counter= -2*dstW;
+               long counter= -2*dstW;
 //             filter-= counter*filterSize/2;
                filterPos-= counter/2;
                dst-= counter/2;
@@ -2121,42 +2135,42 @@ static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW
                        "movq "MANGLE(w02)", %%mm6      \n\t"
                        ".balign 16                     \n\t"
                        "1:                             \n\t"
-                       "movl %2, %%ecx                 \n\t"
-                       "movzwl (%%ecx, %0), %%eax      \n\t"
-                       "movzwl 2(%%ecx, %0), %%ebx     \n\t"
-                       "movl %5, %%ecx                 \n\t"
+                       "mov %2, %%"REG_c"              \n\t"
+                       "movzxw (%%"REG_c", %0), %%"REG_a"\n\t"
+                       "movzxw 2(%%"REG_c", %0), %%"REG_b"\n\t"
+                       "mov %5, %%"REG_c"              \n\t"
                        "pxor %%mm4, %%mm4              \n\t"
                        "pxor %%mm5, %%mm5              \n\t"
                        "2:                             \n\t"
                        "movq (%1), %%mm1               \n\t"
                        "movq (%1, %6), %%mm3           \n\t"
-                       "movd (%%ecx, %%eax), %%mm0     \n\t"
-                       "movd (%%ecx, %%ebx), %%mm2     \n\t"
+                       "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
+                       "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
                        "punpcklbw %%mm7, %%mm0         \n\t"
                        "punpcklbw %%mm7, %%mm2         \n\t"
                        "pmaddwd %%mm1, %%mm0           \n\t"
                        "pmaddwd %%mm2, %%mm3           \n\t"
                        "paddd %%mm3, %%mm5             \n\t"
                        "paddd %%mm0, %%mm4             \n\t"
-                       "addl $8, %1                    \n\t"
-                       "addl $4, %%ecx                 \n\t"
-                       "cmpl %4, %%ecx                 \n\t"
+                       "add $8, %1                     \n\t"
+                       "add $4, %%"REG_c"              \n\t"
+                       "cmp %4, %%"REG_c"              \n\t"
                        " jb 2b                         \n\t"
-                       "addl %6, %1                    \n\t"
+                       "add %6, %1                     \n\t"
                        "psrad $8, %%mm4                \n\t"
                        "psrad $8, %%mm5                \n\t"
                        "packssdw %%mm5, %%mm4          \n\t"
                        "pmaddwd %%mm6, %%mm4           \n\t"
                        "packssdw %%mm4, %%mm4          \n\t"
-                       "movl %3, %%eax                 \n\t"
-                       "movd %%mm4, (%%eax, %0)        \n\t"
-                       "addl $4, %0                    \n\t"
+                       "mov %3, %%"REG_a"              \n\t"
+                       "movd %%mm4, (%%"REG_a", %0)    \n\t"
+                       "add $4, %0                     \n\t"
                        " jnc 1b                        \n\t"
 
                        : "+r" (counter), "+r" (filter)
                        : "m" (filterPos), "m" (dst), "m"(src+filterSize),
-                         "m" (src), "r" (filterSize*2)
-                       : "%ebx", "%eax", "%ecx"
+                         "m" (src), "r" ((long)filterSize*2)
+                       : "%"REG_b, "%"REG_a, "%"REG_c
                );
        }
 #else
@@ -2241,28 +2255,28 @@ static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, in
     }
     else // Fast Bilinear upscale / crap downscale
     {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX2
        int i;
        if(canMMX2BeUsed)
        {
                asm volatile(
                        "pxor %%mm7, %%mm7              \n\t"
-                       "movl %0, %%ecx                 \n\t"
-                       "movl %1, %%edi                 \n\t"
-                       "movl %2, %%edx                 \n\t"
-                       "movl %3, %%ebx                 \n\t"
-                       "xorl %%eax, %%eax              \n\t" // i
-                       PREFETCH" (%%ecx)               \n\t"
-                       PREFETCH" 32(%%ecx)             \n\t"
-                       PREFETCH" 64(%%ecx)             \n\t"
+                       "mov %0, %%"REG_c"              \n\t"
+                       "mov %1, %%"REG_D"              \n\t"
+                       "mov %2, %%"REG_d"              \n\t"
+                       "mov %3, %%"REG_b"              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t" // i
+                       PREFETCH" (%%"REG_c")           \n\t"
+                       PREFETCH" 32(%%"REG_c")         \n\t"
+                       PREFETCH" 64(%%"REG_c")         \n\t"
 
 #define FUNNY_Y_CODE \
-                       "movl (%%ebx), %%esi            \n\t"\
+                       "mov (%%"REG_b"), %%"REG_S"     \n\t"\
                        "call *%4                       \n\t"\
-                       "addl (%%ebx, %%eax), %%ecx     \n\t"\
-                       "addl %%eax, %%edi              \n\t"\
-                       "xorl %%eax, %%eax              \n\t"\
+                       "addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+                       "add %%"REG_a", %%"REG_d"       \n\t"\
+                       "xor %%"REG_a", %%"REG_a"       \n\t"\
 
 FUNNY_Y_CODE
 FUNNY_Y_CODE
@@ -2275,7 +2289,7 @@ FUNNY_Y_CODE
 
                        :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
                        "m" (funnyYCode)
-                       : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+                       : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_d
                );
                for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
        }
@@ -2284,43 +2298,43 @@ FUNNY_Y_CODE
 #endif
        //NO MMX just normal asm ...
        asm volatile(
-               "xorl %%eax, %%eax              \n\t" // i
-               "xorl %%ebx, %%ebx              \n\t" // xx
+               "xor %%"REG_a", %%"REG_a"       \n\t" // i
+               "xor %%"REG_b", %%"REG_b"       \n\t" // xx
                "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               "movzbl  (%0, %%ebx), %%edi     \n\t" //src[xx]
-               "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
+               "movzbl  (%0, %%"REG_b"), %%edi \n\t" //src[xx]
+               "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
                "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
                "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
                "shll $16, %%edi                \n\t"
                "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-               "movl %1, %%edi                 \n\t"
+               "mov %1, %%"REG_D"              \n\t"
                "shrl $9, %%esi                 \n\t"
-               "movw %%si, (%%edi, %%eax, 2)   \n\t"
+               "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
                "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
-               "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
+               "adc %3, %%"REG_b"              \n\t" //xx+= xInc>>8 + carry
 
-               "movzbl (%0, %%ebx), %%edi      \n\t" //src[xx]
-               "movzbl 1(%0, %%ebx), %%esi     \n\t" //src[xx+1]
+               "movzbl (%0, %%"REG_b"), %%edi  \n\t" //src[xx]
+               "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
                "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
                "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
                "shll $16, %%edi                \n\t"
                "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-               "movl %1, %%edi                 \n\t"
+               "mov %1, %%"REG_D"              \n\t"
                "shrl $9, %%esi                 \n\t"
-               "movw %%si, 2(%%edi, %%eax, 2)  \n\t"
+               "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
                "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
-               "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
+               "adc %3, %%"REG_b"              \n\t" //xx+= xInc>>8 + carry
 
 
-               "addl $2, %%eax                 \n\t"
-               "cmpl %2, %%eax                 \n\t"
+               "add $2, %%"REG_a"              \n\t"
+               "cmp %2, %%"REG_a"              \n\t"
                " jb 1b                         \n\t"
 
 
                :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
-               : "%eax", "%ebx", "%ecx", "%edi", "%esi"
+               : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
                );
 #ifdef HAVE_MMX2
        } //if MMX2 can't be used
@@ -2410,40 +2424,40 @@ inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, u
     }
     else // Fast Bilinear upscale / crap downscale
     {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 #ifdef HAVE_MMX2
        int i;
        if(canMMX2BeUsed)
        {
                asm volatile(
                        "pxor %%mm7, %%mm7              \n\t"
-                       "movl %0, %%ecx                 \n\t"
-                       "movl %1, %%edi                 \n\t"
-                       "movl %2, %%edx                 \n\t"
-                       "movl %3, %%ebx                 \n\t"
-                       "xorl %%eax, %%eax              \n\t" // i
-                       PREFETCH" (%%ecx)               \n\t"
-                       PREFETCH" 32(%%ecx)             \n\t"
-                       PREFETCH" 64(%%ecx)             \n\t"
+                       "mov %0, %%"REG_c"              \n\t"
+                       "mov %1, %%"REG_D"              \n\t"
+                       "mov %2, %%"REG_d"              \n\t"
+                       "mov %3, %%"REG_b"              \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t" // i
+                       PREFETCH" (%%"REG_c")           \n\t"
+                       PREFETCH" 32(%%"REG_c")         \n\t"
+                       PREFETCH" 64(%%"REG_c")         \n\t"
 
 #define FUNNY_UV_CODE \
-                       "movl (%%ebx), %%esi            \n\t"\
+                       "movl (%%"REG_b"), %%esi        \n\t"\
                        "call *%4                       \n\t"\
-                       "addl (%%ebx, %%eax), %%ecx     \n\t"\
-                       "addl %%eax, %%edi              \n\t"\
-                       "xorl %%eax, %%eax              \n\t"\
+                       "addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+                       "add %%"REG_a", %%"REG_D"       \n\t"\
+                       "xor %%"REG_a", %%"REG_a"       \n\t"\
 
 FUNNY_UV_CODE
 FUNNY_UV_CODE
 FUNNY_UV_CODE
 FUNNY_UV_CODE
-                       "xorl %%eax, %%eax              \n\t" // i
-                       "movl %5, %%ecx                 \n\t" // src
-                       "movl %1, %%edi                 \n\t" // buf1
-                       "addl $4096, %%edi              \n\t"
-                       PREFETCH" (%%ecx)               \n\t"
-                       PREFETCH" 32(%%ecx)             \n\t"
-                       PREFETCH" 64(%%ecx)             \n\t"
+                       "xor %%"REG_a", %%"REG_a"       \n\t" // i
+                       "mov %5, %%"REG_c"              \n\t" // src
+                       "mov %1, %%"REG_D"              \n\t" // buf1
+                       "add $4096, %%"REG_D"           \n\t"
+                       PREFETCH" (%%"REG_c")           \n\t"
+                       PREFETCH" 32(%%"REG_c")         \n\t"
+                       PREFETCH" 64(%%"REG_c")         \n\t"
 
 FUNNY_UV_CODE
 FUNNY_UV_CODE
@@ -2452,7 +2466,7 @@ FUNNY_UV_CODE
 
                        :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
                        "m" (funnyUVCode), "m" (src2)
-                       : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+                       : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%esi", "%"REG_D
                );
                for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
                {
@@ -2465,41 +2479,41 @@ FUNNY_UV_CODE
        {
 #endif
        asm volatile(
-               "xorl %%eax, %%eax              \n\t" // i
-               "xorl %%ebx, %%ebx              \n\t" // xx
+               "xor %%"REG_a", %%"REG_a"       \n\t" // i
+               "xor %%"REG_b", %%"REG_b"               \n\t" // xx
                "xorl %%ecx, %%ecx              \n\t" // 2*xalpha
                ".balign 16                     \n\t"
                "1:                             \n\t"
-               "movl %0, %%esi                 \n\t"
-               "movzbl  (%%esi, %%ebx), %%edi  \n\t" //src[xx]
-               "movzbl 1(%%esi, %%ebx), %%esi  \n\t" //src[xx+1]
+               "mov %0, %%"REG_S"              \n\t"
+               "movzbl  (%%"REG_S", %%"REG_b"), %%edi  \n\t" //src[xx]
+               "movzbl 1(%%"REG_S", %%"REG_b"), %%esi  \n\t" //src[xx+1]
                "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
                "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
                "shll $16, %%edi                \n\t"
                "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-               "movl %1, %%edi                 \n\t"
+               "mov %1, %%"REG_D"              \n\t"
                "shrl $9, %%esi                 \n\t"
-               "movw %%si, (%%edi, %%eax, 2)   \n\t"
+               "movw %%si, (%%"REG_d", %%"REG_a", 2)\n\t"
 
-               "movzbl  (%5, %%ebx), %%edi     \n\t" //src[xx]
-               "movzbl 1(%5, %%ebx), %%esi     \n\t" //src[xx+1]
+               "movzbl  (%5, %%"REG_b"), %%edi \n\t" //src[xx]
+               "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
                "subl %%edi, %%esi              \n\t" //src[xx+1] - src[xx]
                "imull %%ecx, %%esi             \n\t" //(src[xx+1] - src[xx])*2*xalpha
                "shll $16, %%edi                \n\t"
                "addl %%edi, %%esi              \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
-               "movl %1, %%edi                 \n\t"
+               "mov %1, %%"REG_D"              \n\t"
                "shrl $9, %%esi                 \n\t"
-               "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
+               "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
 
                "addw %4, %%cx                  \n\t" //2*xalpha += xInc&0xFF
-               "adcl %3, %%ebx                 \n\t" //xx+= xInc>>8 + carry
-               "addl $1, %%eax                 \n\t"
-               "cmpl %2, %%eax                 \n\t"
+               "adc %3, %%"REG_b"              \n\t" //xx+= xInc>>8 + carry
+               "add $1, %%"REG_a"              \n\t"
+               "cmp %2, %%"REG_a"              \n\t"
                " jb 1b                         \n\t"
 
-               :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
+               :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" ((long)(xInc>>16)), "m" ((xInc&0xFFFF)),
                "r" (src2)
-               : "%eax", "%ebx", "%ecx", "%edi", "%esi"
+               : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
                );
 #ifdef HAVE_MMX2
        } //if MMX2 can't be used
index 317ada9..d8a31ef 100644 (file)
@@ -156,7 +156,7 @@ const uint8_t  __attribute__((aligned(8))) dither_8x8_220[8][8]={
 };
 #endif
 
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
 
 /* hope these constant values are cache line aligned */
 uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
@@ -183,14 +183,12 @@ uint64_t __attribute__((aligned(8))) dither8[2]={
        0x0004000400040004LL,};
 
 #undef HAVE_MMX
-#undef ARCH_X86
 
 //MMX versions
 #undef RENAME
 #define HAVE_MMX
 #undef HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX
 #include "yuv2rgb_template.c"
 
@@ -199,7 +197,6 @@ uint64_t __attribute__((aligned(8))) dither8[2]={
 #define HAVE_MMX
 #define HAVE_MMX2
 #undef HAVE_3DNOW
-#define ARCH_X86
 #define RENAME(a) a ## _MMX2
 #include "yuv2rgb_template.c"
 
@@ -583,7 +580,7 @@ EPILOG(1)
 
 SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
 {
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
     if(c->flags & SWS_CPU_CAPS_MMX2){
        switch(c->dstFormat){
        case IMGFMT_BGR32: return yuv420_rgb32_MMX2;
index 4b81c7e..2f15931 100644 (file)
@@ -143,7 +143,7 @@ static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStr
        uint8_t *_py = src[0] + y*srcStride[0];
        uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
        uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-       int index= -h_size/2;
+       long index= -h_size/2;
 
        b5Dither= dither8[y&1];
        g6Dither= dither4[y&1];
@@ -204,8 +204,8 @@ YUV2RGB
 
                     MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
                     
-                    "addl $16, %1                      \n\t"
-                    "addl $4, %0                       \n\t"
+                    "add $16, %1                       \n\t"
+                    "add $4, %0                        \n\t"
                     " js 1b                            \n\t"
                     
                     : "+r" (index), "+r" (_image)
@@ -238,7 +238,7 @@ static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStr
        uint8_t *_py = src[0] + y*srcStride[0];
        uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
        uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-       int index= -h_size/2;
+       long index= -h_size/2;
 
        b5Dither= dither8[y&1];
        g6Dither= dither4[y&1];
@@ -295,8 +295,8 @@ YUV2RGB
 
                     MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
                     
-                    "addl $16, %1                      \n\t"
-                    "addl $4, %0                       \n\t"
+                    "add $16, %1                       \n\t"
+                    "add $4, %0                        \n\t"
                     " js 1b                            \n\t"
                     : "+r" (index), "+r" (_image)
                     : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
@@ -326,7 +326,7 @@ static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStr
        uint8_t *_py = src[0] + y*srcStride[0];
        uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
        uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-       int index= -h_size/2;
+       long index= -h_size/2;
 
            /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
               pixels in each iteration */
@@ -440,8 +440,8 @@ YUV2RGB
                        "pxor %%mm4, %%mm4              \n\t"
 #endif
                     
-                    "addl $24, %1                      \n\t"
-                    "addl $4, %0                       \n\t"
+                    "add $24, %1                       \n\t"
+                    "add $4, %0                        \n\t"
                     " js 1b                            \n\t"
                     
                     : "+r" (index), "+r" (_image)
@@ -472,7 +472,7 @@ static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStr
        uint8_t *_py = src[0] + y*srcStride[0];
        uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
        uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
-       int index= -h_size/2;
+       long index= -h_size/2;
 
            /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
               pixels in each iteration */
@@ -526,8 +526,8 @@ YUV2RGB
                     "pxor %%mm4, %%mm4;" /* zero mm4 */
                     "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
 
-                    "addl $32, %1                      \n\t"
-                    "addl $4, %0                       \n\t"
+                    "add $32, %1                       \n\t"
+                    "add $4, %0                        \n\t"
                     " js 1b                            \n\t"
                     
                     : "+r" (index), "+r" (_image)