https://gerrit.chromium.org/gerrit/#/c/71007/
[modified]
commit 2695f18211e9b1017647af608a64d72a688ffbe7
Author: Jan Kratochvil <jan.kratochvil@redhat.com>
Date: Mon Jul 28 20:07:38 2014 +0200
Fix --as=nasm compatibility for new asm code.
s/movd/movq/
s/pmovmskb rX,/pmovmskb rXd,/
Add end-of-line ':' label markers.
Change-Id: Icc2c8b66af3cf72598361021699e099739f813d7
diff --git a/vp9/common/x86/vp9_subpixel_8t_ssse3.asm b/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
index fd781d4..57eee70 100644
--- a/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
+++ b/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
@@ -18,7 +18,7 @@
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
@@ -624,7 +624,7 @@ sym(vp9_filter_block1d16_v8_avg_ssse3):
pavgb xmm0, xmm1
%endif
movd [rdi], xmm0
-.done
+.done:
%endm
%macro HORIZx8_ROW 4
@@ -661,7 +661,7 @@ sym(vp9_filter_block1d16_v8_avg_ssse3):
mov rcx, 0x0400040
movdqa xmm4, [rdx] ;load filters
- movd xmm5, rcx
+ movq xmm5, rcx
packsswb xmm4, xmm4
pshuflw xmm0, xmm4, 0b ;k0_k1
pshuflw xmm1, xmm4, 01010101b ;k2_k3
@@ -727,7 +727,7 @@ sym(vp9_filter_block1d16_v8_avg_ssse3):
pavgb xmm0, xmm1
%endif
movq [rdi], xmm0
-.done
+.done:
%endm
%macro HORIZx16 1
diff --git a/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm b/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
index 508e1d4..1c97342 100644
--- a/vp9/encoder/x86/vp9_quantize_ssse3.asm
+++ b/vp9/encoder/x86/vp9_quantize_ssse3.asm
@@ -122,8 +122,8 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
pcmpgtw m7, m6, m0 ; m7 = c[i] >= zbin
pcmpgtw m12, m11, m0 ; m12 = c[i] >= zbin
%ifidn %1, b_32x32
- pmovmskb r6, m7
- pmovmskb r2, m12
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
or r6, r2
jz .skip_iter
%endif
diff --git a/vp9/encoder/x86/vp9_subpel_variance.asm b/vp9/encoder/x86/vp9_subpel_variance.asm
index 1a9e4e8..e06e220 100644
--- a/vp9/encoder/x86/vp9_subpel_variance.asm
+++ b/vp9/encoder/x86/vp9_subpel_variance.asm
@@ -101,7 +101,6 @@ SECTION .text
pshufd m4, m6, 0x1
movd [r1], m7 ; store sse
paddd m6, m4
- movd rax, m6 ; store sum as return value
%else ; mmsize == 8
pshufw m4, m6, 0xe
pshufw m3, m7, 0xe
@@ -113,7 +112,11 @@ SECTION .text
movd [r1], m7 ; store sse
pshufw m4, m6, 0xe
paddd m6, m4
- movd rax, m6 ; store sum as return value
+%endif
+%if ARCH_X86_64
+ movq rax, m6 ; store sum as return value
+%else
+ movd eax, m6 ; store sum as return value
%endif
RET
%endmacro