yeahuh / rpms / qemu-kvm

Forked from rpms/qemu-kvm 2 years ago
Clone

Blame SOURCES/kvm-crypto-convert-xts_tweak_encdec-to-use-xts_uint128-t.patch

b38b0f
From 8e7643e39fc0c539f55d85263e87f64dca52efea Mon Sep 17 00:00:00 2001
b38b0f
From: "Daniel P. Berrange" <berrange@redhat.com>
b38b0f
Date: Wed, 24 Apr 2019 09:56:39 +0100
b38b0f
Subject: [PATCH 5/9] crypto: convert xts_tweak_encdec to use xts_uint128 type
b38b0f
MIME-Version: 1.0
b38b0f
Content-Type: text/plain; charset=UTF-8
b38b0f
Content-Transfer-Encoding: 8bit
b38b0f
b38b0f
RH-Author: Daniel P. Berrange <berrange@redhat.com>
b38b0f
Message-id: <20190424095643.796-6-berrange@redhat.com>
b38b0f
Patchwork-id: 85883
b38b0f
O-Subject: [RHEL-8.1.0 qemu-kvm PATCH 5/9] crypto: convert xts_tweak_encdec to use xts_uint128 type
b38b0f
Bugzilla: 1680231
b38b0f
RH-Acked-by: Philippe Mathieu-Daudé <philmd@redhat.com>
b38b0f
RH-Acked-by: John Snow <jsnow@redhat.com>
b38b0f
RH-Acked-by: Eric Blake <eblake@redhat.com>
b38b0f
b38b0f
Using 64-bit arithmetic increases the performance for xts-aes-128
b38b0f
when built with gcrypt:
b38b0f
b38b0f
  Encrypt: 272 MB/s -> 355 MB/s
b38b0f
  Decrypt: 275 MB/s -> 362 MB/s
b38b0f
b38b0f
Reviewed-by: Alberto Garcia <berto@igalia.com>
b38b0f
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
b38b0f
(cherry picked from commit db217c69f0849add67cfa2cd6601c329398be12c)
b38b0f
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
b38b0f
---
b38b0f
 crypto/xts.c | 84 +++++++++++++++++++++++++++++++++++++++++-------------------
b38b0f
 1 file changed, 58 insertions(+), 26 deletions(-)
b38b0f
b38b0f
diff --git a/crypto/xts.c b/crypto/xts.c
b38b0f
index bee23f8..0ad231f 100644
b38b0f
--- a/crypto/xts.c
b38b0f
+++ b/crypto/xts.c
b38b0f
@@ -31,6 +31,13 @@ typedef union {
b38b0f
     uint64_t u[2];
b38b0f
 } xts_uint128;
b38b0f
 
b38b0f
+static inline void xts_uint128_xor(xts_uint128 *D,
b38b0f
+                                   const xts_uint128 *S1,
b38b0f
+                                   const xts_uint128 *S2)
b38b0f
+{
b38b0f
+    D->u[0] = S1->u[0] ^ S2->u[0];
b38b0f
+    D->u[1] = S1->u[1] ^ S2->u[1];
b38b0f
+}
b38b0f
 
b38b0f
 static void xts_mult_x(uint8_t *I)
b38b0f
 {
b38b0f
@@ -60,25 +67,19 @@ static void xts_mult_x(uint8_t *I)
b38b0f
  */
b38b0f
 static void xts_tweak_encdec(const void *ctx,
b38b0f
                              xts_cipher_func *func,
b38b0f
-                             const uint8_t *src,
b38b0f
-                             uint8_t *dst,
b38b0f
-                             uint8_t *iv)
b38b0f
+                             const xts_uint128 *src,
b38b0f
+                             xts_uint128 *dst,
b38b0f
+                             xts_uint128 *iv)
b38b0f
 {
b38b0f
-    unsigned long x;
b38b0f
-
b38b0f
     /* tweak encrypt block i */
b38b0f
-    for (x = 0; x < XTS_BLOCK_SIZE; x++) {
b38b0f
-        dst[x] = src[x] ^ iv[x];
b38b0f
-    }
b38b0f
+    xts_uint128_xor(dst, src, iv);
b38b0f
 
b38b0f
-    func(ctx, XTS_BLOCK_SIZE, dst, dst);
b38b0f
+    func(ctx, XTS_BLOCK_SIZE, dst->b, dst->b);
b38b0f
 
b38b0f
-    for (x = 0; x < XTS_BLOCK_SIZE; x++) {
b38b0f
-        dst[x] = dst[x] ^ iv[x];
b38b0f
-    }
b38b0f
+    xts_uint128_xor(dst, dst, iv);
b38b0f
 
b38b0f
     /* LFSR the tweak */
b38b0f
-    xts_mult_x(iv);
b38b0f
+    xts_mult_x(iv->b);
b38b0f
 }
b38b0f
 
b38b0f
 
b38b0f
@@ -110,20 +111,34 @@ void xts_decrypt(const void *datactx,
b38b0f
     /* encrypt the iv */
b38b0f
     encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
b38b0f
 
b38b0f
-    for (i = 0; i < lim; i++) {
b38b0f
-        xts_tweak_encdec(datactx, decfunc, src, dst, T.b);
b38b0f
-
b38b0f
-        src += XTS_BLOCK_SIZE;
b38b0f
-        dst += XTS_BLOCK_SIZE;
b38b0f
+    if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
b38b0f
+        QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
b38b0f
+        xts_uint128 *S = (xts_uint128 *)src;
b38b0f
+        xts_uint128 *D = (xts_uint128 *)dst;
b38b0f
+        for (i = 0; i < lim; i++, S++, D++) {
b38b0f
+            xts_tweak_encdec(datactx, decfunc, S, D, &T);
b38b0f
+        }
b38b0f
+    } else {
b38b0f
+        xts_uint128 D;
b38b0f
+
b38b0f
+        for (i = 0; i < lim; i++) {
b38b0f
+            memcpy(&D, src, XTS_BLOCK_SIZE);
b38b0f
+            xts_tweak_encdec(datactx, decfunc, &D, &D, &T);
b38b0f
+            memcpy(dst, &D, XTS_BLOCK_SIZE);
b38b0f
+            src += XTS_BLOCK_SIZE;
b38b0f
+            dst += XTS_BLOCK_SIZE;
b38b0f
+        }
b38b0f
     }
b38b0f
 
b38b0f
     /* if length is not a multiple of XTS_BLOCK_SIZE then */
b38b0f
     if (mo > 0) {
b38b0f
+        xts_uint128 S, D;
b38b0f
         memcpy(&CC, &T, XTS_BLOCK_SIZE);
b38b0f
         xts_mult_x(CC.b);
b38b0f
 
b38b0f
         /* PP = tweak decrypt block m-1 */
b38b0f
-        xts_tweak_encdec(datactx, decfunc, src, PP.b, CC.b);
b38b0f
+        memcpy(&S, src, XTS_BLOCK_SIZE);
b38b0f
+        xts_tweak_encdec(datactx, decfunc, &S, &PP, &CC;;
b38b0f
 
b38b0f
         /* Pm = first length % XTS_BLOCK_SIZE bytes of PP */
b38b0f
         for (i = 0; i < mo; i++) {
b38b0f
@@ -135,7 +150,8 @@ void xts_decrypt(const void *datactx,
b38b0f
         }
b38b0f
 
b38b0f
         /* Pm-1 = Tweak uncrypt CC */
b38b0f
-        xts_tweak_encdec(datactx, decfunc, CC.b, dst, T.b);
b38b0f
+        xts_tweak_encdec(datactx, decfunc, &CC, &D, &T);
b38b0f
+        memcpy(dst, &D, XTS_BLOCK_SIZE);
b38b0f
     }
b38b0f
 
b38b0f
     /* Decrypt the iv back */
b38b0f
@@ -171,17 +187,32 @@ void xts_encrypt(const void *datactx,
b38b0f
     /* encrypt the iv */
b38b0f
     encfunc(tweakctx, XTS_BLOCK_SIZE, T.b, iv);
b38b0f
 
b38b0f
-    for (i = 0; i < lim; i++) {
b38b0f
-        xts_tweak_encdec(datactx, encfunc, src, dst, T.b);
b38b0f
+    if (QEMU_PTR_IS_ALIGNED(src, sizeof(uint64_t)) &&
b38b0f
+        QEMU_PTR_IS_ALIGNED(dst, sizeof(uint64_t))) {
b38b0f
+        xts_uint128 *S = (xts_uint128 *)src;
b38b0f
+        xts_uint128 *D = (xts_uint128 *)dst;
b38b0f
+        for (i = 0; i < lim; i++, S++, D++) {
b38b0f
+            xts_tweak_encdec(datactx, encfunc, S, D, &T);
b38b0f
+        }
b38b0f
+    } else {
b38b0f
+        xts_uint128 D;
b38b0f
+
b38b0f
+        for (i = 0; i < lim; i++) {
b38b0f
+            memcpy(&D, src, XTS_BLOCK_SIZE);
b38b0f
+            xts_tweak_encdec(datactx, encfunc, &D, &D, &T);
b38b0f
+            memcpy(dst, &D, XTS_BLOCK_SIZE);
b38b0f
 
b38b0f
-        dst += XTS_BLOCK_SIZE;
b38b0f
-        src += XTS_BLOCK_SIZE;
b38b0f
+            dst += XTS_BLOCK_SIZE;
b38b0f
+            src += XTS_BLOCK_SIZE;
b38b0f
+        }
b38b0f
     }
b38b0f
 
b38b0f
     /* if length is not a multiple of XTS_BLOCK_SIZE then */
b38b0f
     if (mo > 0) {
b38b0f
+        xts_uint128 S, D;
b38b0f
         /* CC = tweak encrypt block m-1 */
b38b0f
-        xts_tweak_encdec(datactx, encfunc, src, CC.b, T.b);
b38b0f
+        memcpy(&S, src, XTS_BLOCK_SIZE);
b38b0f
+        xts_tweak_encdec(datactx, encfunc, &S, &CC, &T);
b38b0f
 
b38b0f
         /* Cm = first length % XTS_BLOCK_SIZE bytes of CC */
b38b0f
         for (i = 0; i < mo; i++) {
b38b0f
@@ -194,7 +225,8 @@ void xts_encrypt(const void *datactx,
b38b0f
         }
b38b0f
 
b38b0f
         /* Cm-1 = Tweak encrypt PP */
b38b0f
-        xts_tweak_encdec(datactx, encfunc, PP.b, dst, T.b);
b38b0f
+        xts_tweak_encdec(datactx, encfunc, &PP, &D, &T);
b38b0f
+        memcpy(dst, &D, XTS_BLOCK_SIZE);
b38b0f
     }
b38b0f
 
b38b0f
     /* Decrypt the iv back */
b38b0f
-- 
b38b0f
1.8.3.1
b38b0f