sse16 & pix_norm1 optimization patch by (Felix von Leitner <felix-ffmpeg at fefe...
authorFelix von Leitner <felix-ffmpeg@fefe.de>
Sat, 11 Jan 2003 22:32:56 +0000 (22:32 +0000)
committerMichael Niedermayer <michaelni@gmx.at>
Sat, 11 Jan 2003 22:32:56 +0000 (22:32 +0000)
Originally committed as revision 1450 to svn://svn.ffmpeg.org/ffmpeg/trunk

libavcodec/dsputil.c
libavcodec/i386/dsputil_mmx.c

index 80e9ea1470b512e32a247a3a8d7da7eb8402c43a..fb6399700d2822b93abb4cd10854c882e8620823 100644 (file)
@@ -129,6 +129,7 @@ static int pix_norm1_c(UINT8 * pix, int line_size)
     s = 0;
     for (i = 0; i < 16; i++) {
        for (j = 0; j < 16; j += 8) {
+#if 0
            s += sq[pix[0]];
            s += sq[pix[1]];
            s += sq[pix[2]];
@@ -137,6 +138,30 @@ static int pix_norm1_c(UINT8 * pix, int line_size)
            s += sq[pix[5]];
            s += sq[pix[6]];
            s += sq[pix[7]];
+#else
+#if LONG_MAX > 2147483647
+           register uint64_t x=*(uint64_t*)pix;
+           s += sq[x&0xff];
+           s += sq[(x>>8)&0xff];
+           s += sq[(x>>16)&0xff];
+           s += sq[(x>>24)&0xff];
+            s += sq[(x>>32)&0xff];
+            s += sq[(x>>40)&0xff];
+            s += sq[(x>>48)&0xff];
+            s += sq[(x>>56)&0xff];
+#else
+           register uint32_t x=*(uint32_t*)pix;
+           s += sq[x&0xff];
+           s += sq[(x>>8)&0xff];
+           s += sq[(x>>16)&0xff];
+           s += sq[(x>>24)&0xff];
+            x=*(uint32_t*)(pix+4);
+            s += sq[x&0xff];
+            s += sq[(x>>8)&0xff];
+            s += sq[(x>>16)&0xff];
+            s += sq[(x>>24)&0xff];
+#endif
+#endif
            pix += 8;
        }
        pix += line_size - 16;
@@ -174,6 +199,38 @@ static int sse16_c(void *v, UINT8 * pix1, UINT8 * pix2, int line_size)
     s = 0;
     for (i = 0; i < 16; i++) {
         for (j = 0; j < 16; j += 8) {
+#if 1
+#if LONG_MAX > 2147483647
+           uint64_t x,y;
+           x=*(uint64_t*)pix1;
+           y=*(uint64_t*)pix2;
+
+           s += sq[(x&0xff) - (y&0xff)];
+           s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
+           s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
+           s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
+           s += sq[((x>>32)&0xff) - ((y>>32)&0xff)];
+           s += sq[((x>>40)&0xff) - ((y>>40)&0xff)];
+           s += sq[((x>>48)&0xff) - ((y>>48)&0xff)];
+           s += sq[((x>>56)&0xff) - ((y>>56)&0xff)];
+#else
+           uint32_t x,y;
+           x=*(uint32_t*)pix1;
+           y=*(uint32_t*)pix2;
+
+           s += sq[(x&0xff) - (y&0xff)];
+           s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
+           s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
+           s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
+
+           x=*(uint32_t*)(pix1+4);
+           y=*(uint32_t*)(pix2+4);
+           s += sq[(x&0xff) - (y&0xff)];
+           s += sq[((x>>8)&0xff) - ((y>>8)&0xff)];
+           s += sq[((x>>16)&0xff) - ((y>>16)&0xff)];
+           s += sq[((x>>24)&0xff) - ((y>>24)&0xff)];
+#endif
+#else
             s += sq[pix1[0] - pix2[0]];
             s += sq[pix1[1] - pix2[1]];
             s += sq[pix1[2] - pix2[2]];
@@ -182,6 +239,7 @@ static int sse16_c(void *v, UINT8 * pix1, UINT8 * pix2, int line_size)
             s += sq[pix1[5] - pix2[5]];
             s += sq[pix1[6] - pix2[6]];
             s += sq[pix1[7] - pix2[7]];
+#endif
             pix1 += 8;
             pix2 += 8;
         }
index 595881f784ef442285a015e21a846d94938a0976..857f1d39851fbebdab9971950712baf15dc322a6 100644 (file)
@@ -485,6 +485,107 @@ static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
         dst[i+0] += src[i+0];
 }
 
+static int pix_norm1_mmx(uint8_t *pix, int line_size) {
+    int tmp;
+  asm volatile (
+      "movl $16,%%ecx\n"
+      "pxor %%mm0,%%mm0\n"
+      "pxor %%mm7,%%mm7\n"
+      "1:\n"
+      "movq (%0),%%mm2\n"      /* mm2 = pix[0-7] */
+      "movq 8(%0),%%mm3\n"     /* mm3 = pix[8-15] */
+
+      "movq %%mm2,%%mm1\n"     /* mm1 = mm2 = pix[0-7] */
+
+      "punpckhbw %%mm0,%%mm1\n"        /* mm1 = [pix4-7] */
+      "punpcklbw %%mm0,%%mm2\n"        /* mm2 = [pix0-3] */
+
+      "movq %%mm3,%%mm4\n"     /* mm4 = mm3 = pix[8-15] */
+      "punpckhbw %%mm0,%%mm3\n"        /* mm3 = [pix12-15] */
+      "punpcklbw %%mm0,%%mm4\n"        /* mm4 = [pix8-11] */
+
+      "pmaddwd %%mm1,%%mm1\n"  /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
+      "pmaddwd %%mm2,%%mm2\n"  /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
+
+      "pmaddwd %%mm3,%%mm3\n"
+      "pmaddwd %%mm4,%%mm4\n"
+
+      "paddd %%mm1,%%mm2\n"    /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
+                                         pix2^2+pix3^2+pix6^2+pix7^2) */
+      "paddd %%mm3,%%mm4\n"
+      "paddd %%mm2,%%mm7\n"
+
+      "addl %2, %0\n"
+      "paddd %%mm4,%%mm7\n"
+      "dec %%ecx\n"
+      "jnz 1b\n"
+
+      "movq %%mm7,%%mm1\n"
+      "psrlq $32, %%mm7\n"     /* shift hi dword to lo */
+      "paddd %%mm7,%%mm1\n"
+      "movd %%mm1,%1\n"
+      : "+r" (pix), "=r"(tmp) : "r" (line_size) : "%ecx" );
+    return tmp;
+}
+
+static int sse16_mmx(void *v, UINT8 * pix1, UINT8 * pix2, int line_size) {
+    int tmp;
+  asm volatile (
+      "movl $16,%%ecx\n"
+      "pxor %%mm0,%%mm0\n"     /* mm0 = 0 */
+      "pxor %%mm7,%%mm7\n"     /* mm7 holds the sum */
+      "1:\n"
+      "movq (%0),%%mm1\n"      /* mm1 = pix1[0-7] */
+      "movq (%1),%%mm2\n"      /* mm2 = pix2[0-7] */
+      "movq 8(%0),%%mm3\n"     /* mm3 = pix1[8-15] */
+      "movq 8(%1),%%mm4\n"     /* mm4 = pix2[8-15] */
+
+      /* todo: mm1-mm2, mm3-mm4 */
+      /* algo: substract mm1 from mm2 with saturation and vice versa */
+      /*       OR the results to get absolute difference */
+      "movq %%mm1,%%mm5\n"
+      "movq %%mm3,%%mm6\n"
+      "psubusb %%mm2,%%mm1\n"
+      "psubusb %%mm4,%%mm3\n"
+      "psubusb %%mm5,%%mm2\n"
+      "psubusb %%mm6,%%mm4\n"
+
+      "por %%mm1,%%mm2\n"
+      "por %%mm3,%%mm4\n"
+
+      /* now convert to 16-bit vectors so we can square them */
+      "movq %%mm2,%%mm1\n"
+      "movq %%mm4,%%mm3\n"
+
+      "punpckhbw %%mm0,%%mm2\n"
+      "punpckhbw %%mm0,%%mm4\n"
+      "punpcklbw %%mm0,%%mm1\n"        /* mm1 now spread over (mm1,mm2) */
+      "punpcklbw %%mm0,%%mm3\n"        /* mm4 now spread over (mm3,mm4) */
+
+      "pmaddwd %%mm2,%%mm2\n"
+      "pmaddwd %%mm4,%%mm4\n"
+      "pmaddwd %%mm1,%%mm1\n"
+      "pmaddwd %%mm3,%%mm3\n"
+
+      "addl %3,%0\n"
+      "addl %3,%1\n"
+
+      "paddd %%mm2,%%mm1\n"
+      "paddd %%mm4,%%mm3\n"
+      "paddd %%mm1,%%mm7\n"
+      "paddd %%mm3,%%mm7\n"
+
+      "decl %%ecx\n"
+      "jnz 1b\n"
+
+      "movq %%mm7,%%mm1\n"
+      "psrlq $32, %%mm7\n"     /* shift hi dword to lo */
+      "paddd %%mm7,%%mm1\n"
+      "movd %%mm1,%2\n"
+      : "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" (line_size) : "ecx");
+    return tmp;
+}
+
 static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
     int i=0;
     asm volatile(
@@ -1416,6 +1517,9 @@ void dsputil_init_mmx(DSPContext* c, unsigned mask)
         
         c->sad[0]= sad16x16_mmx;
         c->sad[1]= sad8x8_mmx;
+
+       c->pix_norm1 = pix_norm1_mmx;
+       c->sse[0] = sse16_mmx;
         
         if (mm_flags & MM_MMXEXT) {
             c->pix_abs16x16     = pix_abs16x16_mmx2;