|
|
02abd8 |
From f6a26d09257dde9cd41144120543c8b754de515f Mon Sep 17 00:00:00 2001
|
|
|
02abd8 |
From: Fernando Seiti Furusato <ferseiti@linux.vnet.ibm.com>
|
|
|
02abd8 |
Date: Thu, 25 Jun 2015 15:59:54 +0300
|
|
|
02abd8 |
Subject: [PATCH 2/5] vmx: adjust macros when loading vectors on ppc64le
|
|
|
02abd8 |
|
|
|
02abd8 |
Replaced usage of vec_lvsl to direct unaligned assignment
|
|
|
02abd8 |
operation (=). That is because, according to Power ABI Specification,
|
|
|
02abd8 |
the usage of lvsl is deprecated on ppc64le.
|
|
|
02abd8 |
|
|
|
02abd8 |
Changed COMPUTE_SHIFT_{MASK,MASKS,MASKC} macro usage to no-op for powerpc
|
|
|
02abd8 |
little endian since unaligned access is supported on ppc64le.
|
|
|
02abd8 |
|
|
|
02abd8 |
v2:
|
|
|
02abd8 |
|
|
|
02abd8 |
- replace _LITTLE_ENDIAN with WORDS_BIGENDIAN for consistency
|
|
|
02abd8 |
- fixed whitespaces and indentation issues
|
|
|
02abd8 |
|
|
|
02abd8 |
Signed-off-by: Fernando Seiti Furusato <ferseiti@linux.vnet.ibm.com>
|
|
|
02abd8 |
Reviewed-by: Adam Jackson <ajax@redhat.com>
|
|
|
02abd8 |
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
|
|
|
02abd8 |
Acked-by: Pekka Paalanen <pekka.paalanen@collabora.co.uk>
|
|
|
02abd8 |
---
|
|
|
02abd8 |
pixman/pixman-vmx.c | 25 +++++++++++++++++++++++++
|
|
|
02abd8 |
1 file changed, 25 insertions(+)
|
|
|
02abd8 |
|
|
|
02abd8 |
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
|
|
|
02abd8 |
index d0a4fc8..e33d9d9 100644
|
|
|
02abd8 |
--- a/pixman/pixman-vmx.c
|
|
|
02abd8 |
+++ b/pixman/pixman-vmx.c
|
|
|
02abd8 |
@@ -136,6 +136,7 @@ over (vector unsigned int src,
|
|
|
02abd8 |
over (pix_multiply (src, mask), \
|
|
|
02abd8 |
pix_multiply (srca, mask), dest)
|
|
|
02abd8 |
|
|
|
02abd8 |
+#ifdef WORDS_BIGENDIAN
|
|
|
02abd8 |
|
|
|
02abd8 |
#define COMPUTE_SHIFT_MASK(source) \
|
|
|
02abd8 |
source ## _mask = vec_lvsl (0, source);
|
|
|
02abd8 |
@@ -169,6 +170,30 @@ over (vector unsigned int src,
|
|
|
02abd8 |
v ## mask = (typeof(v ## mask)) \
|
|
|
02abd8 |
vec_perm (tmp1, tmp2, mask ## _mask);
|
|
|
02abd8 |
|
|
|
02abd8 |
+#else
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+/* Now the COMPUTE_SHIFT_{MASK, MASKS, MASKC} below are just no-op.
|
|
|
02abd8 |
+ * They are defined that way because little endian altivec can do unaligned
|
|
|
02abd8 |
+ * reads natively and have no need for constructing the permutation pattern
|
|
|
02abd8 |
+ * variables.
|
|
|
02abd8 |
+ */
|
|
|
02abd8 |
+#define COMPUTE_SHIFT_MASK(source)
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+#define COMPUTE_SHIFT_MASKS(dest, source)
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+#define COMPUTE_SHIFT_MASKC(dest, source, mask)
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+# define LOAD_VECTORS(dest, source) \
|
|
|
02abd8 |
+ v ## source = *((typeof(v ## source)*)source); \
|
|
|
02abd8 |
+ v ## dest = *((typeof(v ## dest)*)dest);
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+# define LOAD_VECTORSC(dest, source, mask) \
|
|
|
02abd8 |
+ v ## source = *((typeof(v ## source)*)source); \
|
|
|
02abd8 |
+ v ## dest = *((typeof(v ## dest)*)dest); \
|
|
|
02abd8 |
+ v ## mask = *((typeof(v ## mask)*)mask);
|
|
|
02abd8 |
+
|
|
|
02abd8 |
+#endif /* WORDS_BIGENDIAN */
|
|
|
02abd8 |
+
|
|
|
02abd8 |
#define LOAD_VECTORSM(dest, source, mask) \
|
|
|
02abd8 |
LOAD_VECTORSC (dest, source, mask) \
|
|
|
02abd8 |
v ## source = pix_multiply (v ## source, \
|
|
|
02abd8 |
--
|
|
|
02abd8 |
2.4.3
|
|
|
02abd8 |
|