Home Home > GIT Browse > SLE15-SP1
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-10-15 14:45:36 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-10-15 17:24:44 +0200
commitfec3515c5412c6fe679010872fc884ce79f9df13 (patch)
tree8999770a2434fa8a702dfac6ad03e7cdce3db910
parent1152ba51cbcf24d99bbdac299a0d8490d0a92de0 (diff)
s390/crypto: fix gcm-aes-s390 selftest failures (bsc#1137861SLE15-SP1
LTC#178091).
-rw-r--r--patches.suse/s390-crypto-fix-gcm-aes-s390-selftest-failures255
-rw-r--r--series.conf1
2 files changed, 256 insertions, 0 deletions
diff --git a/patches.suse/s390-crypto-fix-gcm-aes-s390-selftest-failures b/patches.suse/s390-crypto-fix-gcm-aes-s390-selftest-failures
new file mode 100644
index 0000000000..0e21377d05
--- /dev/null
+++ b/patches.suse/s390-crypto-fix-gcm-aes-s390-selftest-failures
@@ -0,0 +1,255 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Thu, 23 May 2019 16:18:25 +0200
+Subject: s390/crypto: fix gcm-aes-s390 selftest failures
+Git-commit: bef9f0ba300a55d79a69aa172156072182176515
+Patch-mainline: v5.2-rc1
+References: bsc#1137861 LTC#178091
+
+The current kernel uses improved crypto selftests. These
+tests showed that the current implementation of gcm-aes-s390
+is not able to deal with chunks of output buffers which are
+not a multiple of 16 bytes. This patch introduces a rework
+of the gcm aes s390 scatter walk handling which now is able
+to handle any input and output scatter list chunk sizes
+correctly.
+
+Code has been verified by the crypto selftests, the tcrypt
+kernel module and additional tests ran via the af_alg interface.
+
+Cc: <stable@vger.kernel.org>
+Reported-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Patrick Steuer <steuer@linux.ibm.com>
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/crypto/aes_s390.c | 148 +++++++++++++++++++++++++++++++-------------
+ 1 file changed, 107 insertions(+), 41 deletions(-)
+
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -831,19 +831,45 @@ static int gcm_aes_setauthsize(struct cr
+ return 0;
+ }
+
+-static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
+- unsigned int len)
++static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
++ unsigned int len)
+ {
+ memset(gw, 0, sizeof(*gw));
+ gw->walk_bytes_remain = len;
+ scatterwalk_start(&gw->walk, sg);
+ }
+
+-static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
++static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
++{
++ struct scatterlist *nextsg;
++
++ gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
++ while (!gw->walk_bytes) {
++ nextsg = sg_next(gw->walk.sg);
++ if (!nextsg)
++ return 0;
++ scatterwalk_start(&gw->walk, nextsg);
++ gw->walk_bytes = scatterwalk_clamp(&gw->walk,
++ gw->walk_bytes_remain);
++ }
++ gw->walk_ptr = scatterwalk_map(&gw->walk);
++ return gw->walk_bytes;
++}
++
++static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
++ unsigned int nbytes)
++{
++ gw->walk_bytes_remain -= nbytes;
++ scatterwalk_unmap(&gw->walk);
++ scatterwalk_advance(&gw->walk, nbytes);
++ scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
++ gw->walk_ptr = NULL;
++}
++
++static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+ {
+ int n;
+
+- /* minbytesneeded <= AES_BLOCK_SIZE */
+ if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+@@ -856,13 +882,11 @@ static int gcm_sg_walk_go(struct gcm_sg_
+ goto out;
+ }
+
+- gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
+- if (!gw->walk_bytes) {
+- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+- gw->walk_bytes_remain);
++ if (!_gcm_sg_clamp_and_map(gw)) {
++ gw->ptr = NULL;
++ gw->nbytes = 0;
++ goto out;
+ }
+- gw->walk_ptr = scatterwalk_map(&gw->walk);
+
+ if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
+ gw->ptr = gw->walk_ptr;
+@@ -874,51 +898,90 @@ static int gcm_sg_walk_go(struct gcm_sg_
+ n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
+ memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
+ gw->buf_bytes += n;
+- gw->walk_bytes_remain -= n;
+- scatterwalk_unmap(&gw->walk);
+- scatterwalk_advance(&gw->walk, n);
+- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+-
++ _gcm_sg_unmap_and_advance(gw, n);
+ if (gw->buf_bytes >= minbytesneeded) {
+ gw->ptr = gw->buf;
+ gw->nbytes = gw->buf_bytes;
+ goto out;
+ }
+-
+- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+- gw->walk_bytes_remain);
+- if (!gw->walk_bytes) {
+- scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
+- gw->walk_bytes = scatterwalk_clamp(&gw->walk,
+- gw->walk_bytes_remain);
++ if (!_gcm_sg_clamp_and_map(gw)) {
++ gw->ptr = NULL;
++ gw->nbytes = 0;
++ goto out;
+ }
+- gw->walk_ptr = scatterwalk_map(&gw->walk);
+ }
+
+ out:
+ return gw->nbytes;
+ }
+
+-static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
++static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
+ {
+- int n;
++ if (gw->walk_bytes_remain == 0) {
++ gw->ptr = NULL;
++ gw->nbytes = 0;
++ goto out;
++ }
++
++ if (!_gcm_sg_clamp_and_map(gw)) {
++ gw->ptr = NULL;
++ gw->nbytes = 0;
++ goto out;
++ }
+
++ if (gw->walk_bytes >= minbytesneeded) {
++ gw->ptr = gw->walk_ptr;
++ gw->nbytes = gw->walk_bytes;
++ goto out;
++ }
++
++ scatterwalk_unmap(&gw->walk);
++ gw->walk_ptr = NULL;
++
++ gw->ptr = gw->buf;
++ gw->nbytes = sizeof(gw->buf);
++
++out:
++ return gw->nbytes;
++}
++
++static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
++{
+ if (gw->ptr == NULL)
+- return;
++ return 0;
+
+ if (gw->ptr == gw->buf) {
+- n = gw->buf_bytes - bytesdone;
++ int n = gw->buf_bytes - bytesdone;
+ if (n > 0) {
+ memmove(gw->buf, gw->buf + bytesdone, n);
+- gw->buf_bytes -= n;
++ gw->buf_bytes = n;
+ } else
+ gw->buf_bytes = 0;
+- } else {
+- gw->walk_bytes_remain -= bytesdone;
+- scatterwalk_unmap(&gw->walk);
+- scatterwalk_advance(&gw->walk, bytesdone);
+- scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
+- }
++ } else
++ _gcm_sg_unmap_and_advance(gw, bytesdone);
++
++ return bytesdone;
++}
++
++static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
++{
++ int i, n;
++
++ if (gw->ptr == NULL)
++ return 0;
++
++ if (gw->ptr == gw->buf) {
++ for (i = 0; i < bytesdone; i += n) {
++ if (!_gcm_sg_clamp_and_map(gw))
++ return i;
++ n = min(gw->walk_bytes, bytesdone - i);
++ memcpy(gw->walk_ptr, gw->buf + i, n);
++ _gcm_sg_unmap_and_advance(gw, n);
++ }
++ } else
++ _gcm_sg_unmap_and_advance(gw, bytesdone);
++
++ return bytesdone;
+ }
+
+ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
+@@ -931,7 +994,7 @@ static int gcm_aes_crypt(struct aead_req
+ unsigned int pclen = req->cryptlen;
+ int ret = 0;
+
+- unsigned int len, in_bytes, out_bytes,
++ unsigned int n, len, in_bytes, out_bytes,
+ min_bytes, bytes, aad_bytes, pc_bytes;
+ struct gcm_sg_walk gw_in, gw_out;
+ u8 tag[GHASH_DIGEST_SIZE];
+@@ -968,14 +1031,14 @@ static int gcm_aes_crypt(struct aead_req
+ *(u32 *)(param.j0 + ivsize) = 1;
+ memcpy(param.k, ctx->key, ctx->key_len);
+
+- gcm_sg_walk_start(&gw_in, req->src, len);
+- gcm_sg_walk_start(&gw_out, req->dst, len);
++ gcm_walk_start(&gw_in, req->src, len);
++ gcm_walk_start(&gw_out, req->dst, len);
+
+ do {
+ min_bytes = min_t(unsigned int,
+ aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
+- in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
+- out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
++ in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
++ out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
+ bytes = min(in_bytes, out_bytes);
+
+ if (aadlen + pclen <= bytes) {
+@@ -1002,8 +1065,11 @@ static int gcm_aes_crypt(struct aead_req
+ gw_in.ptr + aad_bytes, pc_bytes,
+ gw_in.ptr, aad_bytes);
+
+- gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
+- gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
++ n = aad_bytes + pc_bytes;
++ if (gcm_in_walk_done(&gw_in, n) != n)
++ return -ENOMEM;
++ if (gcm_out_walk_done(&gw_out, n) != n)
++ return -ENOMEM;
+ aadlen -= aad_bytes;
+ pclen -= pc_bytes;
+ } while (aadlen + pclen > 0);
diff --git a/series.conf b/series.conf
index d04cb64afb..3eb0f024dc 100644
--- a/series.conf
+++ b/series.conf
@@ -48491,6 +48491,7 @@
patches.suse/s390-pci-fix-struct-definition-for-set-pci-function
patches.suse/s390-pci-fix-assignment-of-bus-resources
patches.suse/s390-zcrypt-fix-wrong-dispatching-for-control-domain-cprbs
+ patches.suse/s390-crypto-fix-gcm-aes-s390-selftest-failures
patches.suse/cifs-fix-memory-leak-of-pneg_inbuf-on-EOPNOTSUPP-ioctl-case.patch
patches.suse/fs-cifs-smb2pdu-c-fix-buffer-free-in-SMB2_ioctl_free.patch
patches.suse/dfs_cache-fix-a-wrong-use-of-kfree-in-flush_cache_ent-.patch