s390/crypto: cpacf function detection
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Thu, 18 Aug 2016 10:59:46 +0000 (12:59 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 29 Aug 2016 09:05:09 +0000 (11:05 +0200)
The CPACF code makes some assumptions about the availablity of hardware
support. E.g. if the machine supports KM(AES-256) without chaining it is
assumed that KMC(AES-256) with chaining is available as well. For the
existing CPUs this is true but the architecturally correct way is to
check each CPACF functions on its own. This is what the query function
of each instructions is all about.

Reviewed-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/prng.c
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/s390/include/asm/cpacf.h
arch/s390/kvm/kvm-s390.c

index be87575270ff446b2eb64df0081b5d590a2453db..f4ad96ebb7e99996f0ff21c89893e9330237509a 100644 (file)
 #include <crypto/xts.h>
 #include <asm/cpacf.h>
 
-#define AES_KEYLEN_128         1
-#define AES_KEYLEN_192         2
-#define AES_KEYLEN_256         4
-
 static u8 *ctrblk;
 static DEFINE_SPINLOCK(ctrblk_lock);
-static char keylen_flag;
+
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
 
 struct s390_aes_ctx {
        u8 key[AES_MAX_KEY_SIZE];
@@ -65,33 +62,6 @@ struct s390_xts_ctx {
        struct crypto_skcipher *fallback;
 };
 
-/*
- * Check if the key_len is supported by the HW.
- * Returns 0 if it is, a positive number if it is not and software fallback is
- * required or a negative number in case the key size is not valid
- */
-static int need_fallback(unsigned int key_len)
-{
-       switch (key_len) {
-       case 16:
-               if (!(keylen_flag & AES_KEYLEN_128))
-                       return 1;
-               break;
-       case 24:
-               if (!(keylen_flag & AES_KEYLEN_192))
-                       return 1;
-               break;
-       case 32:
-               if (!(keylen_flag & AES_KEYLEN_256))
-                       return 1;
-               break;
-       default:
-               return -1;
-               break;
-       }
-       return 0;
-}
-
 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
                unsigned int key_len)
 {
@@ -115,72 +85,44 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-       u32 *flags = &tfm->crt_flags;
-       int ret;
+       unsigned long fc;
 
-       ret = need_fallback(key_len);
-       if (ret < 0) {
-               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-               return -EINVAL;
-       }
+       /* Pick the correct function code based on the key length */
+       fc = (key_len == 16) ? CPACF_KM_AES_128 :
+            (key_len == 24) ? CPACF_KM_AES_192 :
+            (key_len == 32) ? CPACF_KM_AES_256 : 0;
 
-       sctx->key_len = key_len;
-       if (!ret) {
-               memcpy(sctx->key, in_key, key_len);
-               return 0;
-       }
+       /* Check if the function code is available */
+       sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+       if (!sctx->fc)
+               return setkey_fallback_cip(tfm, in_key, key_len);
 
-       return setkey_fallback_cip(tfm, in_key, key_len);
+       sctx->key_len = key_len;
+       memcpy(sctx->key, in_key, key_len);
+       return 0;
 }
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 
-       if (unlikely(need_fallback(sctx->key_len))) {
+       if (unlikely(!sctx->fc)) {
                crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
                return;
        }
-
-       switch (sctx->key_len) {
-       case 16:
-               cpacf_km(CPACF_KM_AES_128,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       case 24:
-               cpacf_km(CPACF_KM_AES_192,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       case 32:
-               cpacf_km(CPACF_KM_AES_256,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       }
+       cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
 }
 
 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
 
-       if (unlikely(need_fallback(sctx->key_len))) {
+       if (unlikely(!sctx->fc)) {
                crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
                return;
        }
-
-       switch (sctx->key_len) {
-       case 16:
-               cpacf_km(CPACF_KM_AES_128 | CPACF_DECRYPT,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       case 24:
-               cpacf_km(CPACF_KM_AES_192 | CPACF_DECRYPT,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       case 32:
-               cpacf_km(CPACF_KM_AES_256 | CPACF_DECRYPT,
-                        &sctx->key, out, in, AES_BLOCK_SIZE);
-               break;
-       }
+       cpacf_km(sctx->fc | CPACF_DECRYPT,
+                &sctx->key, out, in, AES_BLOCK_SIZE);
 }
 
 static int fallback_init_cip(struct crypto_tfm *tfm)
@@ -289,27 +231,21 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-       int ret;
+       unsigned long fc;
 
-       ret = need_fallback(key_len);
-       if (ret > 0) {
-               sctx->key_len = key_len;
-               return setkey_fallback_blk(tfm, in_key, key_len);
-       }
+       /* Pick the correct function code based on the key length */
+       fc = (key_len == 16) ? CPACF_KM_AES_128 :
+            (key_len == 24) ? CPACF_KM_AES_192 :
+            (key_len == 32) ? CPACF_KM_AES_256 : 0;
 
-       switch (key_len) {
-       case 16:
-               sctx->fc = CPACF_KM_AES_128;
-               break;
-       case 24:
-               sctx->fc = CPACF_KM_AES_192;
-               break;
-       case 32:
-               sctx->fc = CPACF_KM_AES_256;
-               break;
-       }
+       /* Check if the function code is available */
+       sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+       if (!sctx->fc)
+               return setkey_fallback_blk(tfm, in_key, key_len);
 
-       return aes_set_key(tfm, in_key, key_len);
+       sctx->key_len = key_len;
+       memcpy(sctx->key, in_key, key_len);
+       return 0;
 }
 
 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
@@ -340,7 +276,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(need_fallback(sctx->key_len)))
+       if (unlikely(!sctx->fc))
                return fallback_blk_enc(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -354,7 +290,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(need_fallback(sctx->key_len)))
+       if (unlikely(!sctx->fc))
                return fallback_blk_dec(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -413,27 +349,21 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-       int ret;
+       unsigned long fc;
 
-       ret = need_fallback(key_len);
-       if (ret > 0) {
-               sctx->key_len = key_len;
-               return setkey_fallback_blk(tfm, in_key, key_len);
-       }
+       /* Pick the correct function code based on the key length */
+       fc = (key_len == 16) ? CPACF_KMC_AES_128 :
+            (key_len == 24) ? CPACF_KMC_AES_192 :
+            (key_len == 32) ? CPACF_KMC_AES_256 : 0;
 
-       switch (key_len) {
-       case 16:
-               sctx->fc = CPACF_KMC_AES_128;
-               break;
-       case 24:
-               sctx->fc = CPACF_KMC_AES_192;
-               break;
-       case 32:
-               sctx->fc = CPACF_KMC_AES_256;
-               break;
-       }
+       /* Check if the function code is available */
+       sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+       if (!sctx->fc)
+               return setkey_fallback_blk(tfm, in_key, key_len);
 
-       return aes_set_key(tfm, in_key, key_len);
+       sctx->key_len = key_len;
+       memcpy(sctx->key, in_key, key_len);
+       return 0;
 }
 
 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
@@ -476,7 +406,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(need_fallback(sctx->key_len)))
+       if (unlikely(!sctx->fc))
                return fallback_blk_enc(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -490,7 +420,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(need_fallback(sctx->key_len)))
+       if (unlikely(!sctx->fc))
                return fallback_blk_dec(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -582,33 +512,27 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
        struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
-       u32 *flags = &tfm->crt_flags;
+       unsigned long fc;
        int err;
 
        err = xts_check_key(tfm, in_key, key_len);
        if (err)
                return err;
 
-       switch (key_len) {
-       case 32:
-               xts_ctx->fc = CPACF_KM_XTS_128;
-               memcpy(xts_ctx->key + 16, in_key, 16);
-               memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
-               break;
-       case 48:
-               xts_ctx->fc = 0;
-               xts_fallback_setkey(tfm, in_key, key_len);
-               break;
-       case 64:
-               xts_ctx->fc = CPACF_KM_XTS_256;
-               memcpy(xts_ctx->key, in_key, 32);
-               memcpy(xts_ctx->pcc_key, in_key + 32, 32);
-               break;
-       default:
-               *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
-               return -EINVAL;
-       }
+       /* Pick the correct function code based on the key length */
+       fc = (key_len == 32) ? CPACF_KM_XTS_128 :
+            (key_len == 64) ? CPACF_KM_XTS_256 : 0;
+
+       /* Check if the function code is available */
+       xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+       if (!xts_ctx->fc)
+               return xts_fallback_setkey(tfm, in_key, key_len);
+
+       /* Split the XTS key into the two subkeys */
+       key_len = key_len / 2;
        xts_ctx->key_len = key_len;
+       memcpy(xts_ctx->key, in_key, key_len);
+       memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
        return 0;
 }
 
@@ -616,7 +540,7 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
                         struct s390_xts_ctx *xts_ctx,
                         struct blkcipher_walk *walk)
 {
-       unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
+       unsigned int offset = xts_ctx->key_len & 0x10;
        int ret = blkcipher_walk_virt(desc, walk);
        unsigned int nbytes = walk->nbytes;
        unsigned int n;
@@ -634,11 +558,11 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
        memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
        memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
        memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
-       memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+       memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
        /* remove decipher modifier bit from 'func' and call PCC */
        cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
 
-       memcpy(xts_param.key, xts_ctx->key, 32);
+       memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
        memcpy(xts_param.init, pcc_param.xts, 16);
        do {
                /* only use complete blocks */
@@ -662,7 +586,7 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
        struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(xts_ctx->key_len == 48))
+       if (unlikely(!xts_ctx->fc))
                return xts_fallback_encrypt(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -676,7 +600,7 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
        struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
-       if (unlikely(xts_ctx->key_len == 48))
+       if (unlikely(!xts_ctx->fc))
                return xts_fallback_decrypt(desc, dst, src, nbytes);
 
        blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -735,20 +659,21 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
        struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+       unsigned long fc;
 
-       switch (key_len) {
-       case 16:
-               sctx->fc = CPACF_KMCTR_AES_128;
-               break;
-       case 24:
-               sctx->fc = CPACF_KMCTR_AES_192;
-               break;
-       case 32:
-               sctx->fc = CPACF_KMCTR_AES_256;
-               break;
-       }
+       /* Pick the correct function code based on the key length */
+       fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
+            (key_len == 24) ? CPACF_KMCTR_AES_192 :
+            (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
+
+       /* Check if the function code is available */
+       sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
+       if (!sctx->fc)
+               return setkey_fallback_blk(tfm, in_key, key_len);
 
-       return aes_set_key(tfm, in_key, key_len);
+       sctx->key_len = key_len;
+       memcpy(sctx->key, in_key, key_len);
+       return 0;
 }
 
 static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
@@ -832,6 +757,9 @@ static int ctr_aes_encrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
+       if (unlikely(!sctx->fc))
+               return fallback_blk_enc(desc, dst, src, nbytes);
+
        blkcipher_walk_init(&walk, dst, src, nbytes);
        return ctr_aes_crypt(desc, sctx->fc, sctx, &walk);
 }
@@ -843,6 +771,9 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
        struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
        struct blkcipher_walk walk;
 
+       if (unlikely(!sctx->fc))
+               return fallback_blk_dec(desc, dst, src, nbytes);
+
        blkcipher_walk_init(&walk, dst, src, nbytes);
        return ctr_aes_crypt(desc, sctx->fc | CPACF_DECRYPT, sctx, &walk);
 }
@@ -851,11 +782,14 @@ static struct crypto_alg ctr_aes_alg = {
        .cra_name               =       "ctr(aes)",
        .cra_driver_name        =       "ctr-aes-s390",
        .cra_priority           =       400,    /* combo: aes + ctr */
-       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
+                                       CRYPTO_ALG_NEED_FALLBACK,
        .cra_blocksize          =       1,
        .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
        .cra_type               =       &crypto_blkcipher_type,
        .cra_module             =       THIS_MODULE,
+       .cra_init               =       fallback_init_blk,
+       .cra_exit               =       fallback_exit_blk,
        .cra_u                  =       {
                .blkcipher = {
                        .min_keysize            =       AES_MIN_KEY_SIZE,
@@ -893,43 +827,40 @@ static int __init aes_s390_init(void)
 {
        int ret;
 
-       if (cpacf_query(CPACF_KM, CPACF_KM_AES_128))
-               keylen_flag |= AES_KEYLEN_128;
-       if (cpacf_query(CPACF_KM, CPACF_KM_AES_192))
-               keylen_flag |= AES_KEYLEN_192;
-       if (cpacf_query(CPACF_KM, CPACF_KM_AES_256))
-               keylen_flag |= AES_KEYLEN_256;
-
-       if (!keylen_flag)
-               return -EOPNOTSUPP;
-
-       /* z9 109 and z9 BC/EC only support 128 bit key length */
-       if (keylen_flag == AES_KEYLEN_128)
-               pr_info("AES hardware acceleration is only available for"
-                       " 128-bit keys\n");
-
-       ret = aes_s390_register_alg(&aes_alg);
-       if (ret)
-               goto out_err;
+       /* Query available functions for KM, KMC and KMCTR */
+       cpacf_query(CPACF_KM, &km_functions);
+       cpacf_query(CPACF_KMC, &kmc_functions);
+       cpacf_query(CPACF_KMCTR, &kmctr_functions);
 
-       ret = aes_s390_register_alg(&ecb_aes_alg);
-       if (ret)
-               goto out_err;
+       if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
+           cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
+           cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
+               ret = aes_s390_register_alg(&aes_alg);
+               if (ret)
+                       goto out_err;
+               ret = aes_s390_register_alg(&ecb_aes_alg);
+               if (ret)
+                       goto out_err;
+       }
 
-       ret = aes_s390_register_alg(&cbc_aes_alg);
-       if (ret)
-               goto out_err;
+       if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
+           cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
+           cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
+               ret = aes_s390_register_alg(&cbc_aes_alg);
+               if (ret)
+                       goto out_err;
+       }
 
-       if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128) &&
-           cpacf_query(CPACF_KM, CPACF_KM_XTS_256)) {
+       if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
+           cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
                ret = aes_s390_register_alg(&xts_aes_alg);
                if (ret)
                        goto out_err;
        }
 
-       if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128) &&
-           cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192) &&
-           cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256)) {
+       if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
+           cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
+           cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
                ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
                if (!ctrblk) {
                        ret = -ENOMEM;
index b77a546f1e763d0e544dbecdfeab3452e89e23b4..965587eefc3996d8d1b66f30dfb30700ced03d36 100644 (file)
@@ -27,6 +27,8 @@
 static u8 *ctrblk;
 static DEFINE_SPINLOCK(ctrblk_lock);
 
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+
 struct s390_des_ctx {
        u8 iv[DES_BLOCK_SIZE];
        u8 key[DES3_KEY_SIZE];
@@ -36,12 +38,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
                      unsigned int key_len)
 {
        struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-       u32 *flags = &tfm->crt_flags;
        u32 tmp[DES_EXPKEY_WORDS];
 
        /* check for weak keys */
-       if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
-               *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+       if (!des_ekey(tmp, key) &&
+           (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
                return -EINVAL;
        }
 
@@ -238,13 +240,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
                       unsigned int key_len)
 {
        struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-       u32 *flags = &tfm->crt_flags;
 
        if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
            crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
                          DES_KEY_SIZE)) &&
-           (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
-               *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+           (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
                return -EINVAL;
        }
        memcpy(ctx->key, key, key_len);
@@ -554,39 +555,53 @@ static int __init des_s390_init(void)
 {
        int ret;
 
-       if (!cpacf_query(CPACF_KM, CPACF_KM_DEA) ||
-           !cpacf_query(CPACF_KM, CPACF_KM_TDEA_192))
-               return -EOPNOTSUPP;
-
-       ret = des_s390_register_alg(&des_alg);
-       if (ret)
-               goto out_err;
-       ret = des_s390_register_alg(&ecb_des_alg);
-       if (ret)
-               goto out_err;
-       ret = des_s390_register_alg(&cbc_des_alg);
-       if (ret)
-               goto out_err;
-       ret = des_s390_register_alg(&des3_alg);
-       if (ret)
-               goto out_err;
-       ret = des_s390_register_alg(&ecb_des3_alg);
-       if (ret)
-               goto out_err;
-       ret = des_s390_register_alg(&cbc_des3_alg);
-       if (ret)
-               goto out_err;
-
-       if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA) &&
-           cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192)) {
+       /* Query available functions for KM, KMC and KMCTR */
+       cpacf_query(CPACF_KM, &km_functions);
+       cpacf_query(CPACF_KMC, &kmc_functions);
+       cpacf_query(CPACF_KMCTR, &kmctr_functions);
+
+       if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
+               ret = des_s390_register_alg(&des_alg);
+               if (ret)
+                       goto out_err;
+               ret = des_s390_register_alg(&ecb_des_alg);
+               if (ret)
+                       goto out_err;
+       }
+       if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
+               ret = des_s390_register_alg(&cbc_des_alg);
+               if (ret)
+                       goto out_err;
+       }
+       if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
+               ret = des_s390_register_alg(&des3_alg);
+               if (ret)
+                       goto out_err;
+               ret = des_s390_register_alg(&ecb_des3_alg);
+               if (ret)
+                       goto out_err;
+       }
+       if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
+               ret = des_s390_register_alg(&cbc_des3_alg);
+               if (ret)
+                       goto out_err;
+       }
+
+       if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
+           cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
                ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
                if (!ctrblk) {
                        ret = -ENOMEM;
                        goto out_err;
                }
+       }
+
+       if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
                ret = des_s390_register_alg(&ctr_des_alg);
                if (ret)
                        goto out_err;
+       }
+       if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
                ret = des_s390_register_alg(&ctr_des3_alg);
                if (ret)
                        goto out_err;
index 8e87f51767994d70427681ea5d1504c602213c53..564616d48d8bd885ce31c232843f02bbed1d2b3f 100644 (file)
@@ -136,7 +136,7 @@ static struct shash_alg ghash_alg = {
 
 static int __init ghash_mod_init(void)
 {
-       if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
+       if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH))
                return -EOPNOTSUPP;
 
        return crypto_register_shash(&ghash_alg);
index bbf2af74c549856688201ff5e49ec5410c786a1a..79e3a1f6313a2516ed829eccdc1dba7f0e1b655f 100644 (file)
@@ -757,13 +757,13 @@ static int __init prng_init(void)
        int ret;
 
        /* check if the CPU has a PRNG */
-       if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
+       if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
                return -EOPNOTSUPP;
 
        /* choose prng mode */
        if (prng_mode != PRNG_MODE_TDES) {
                /* check for MSA5 support for PPNO operations */
-               if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
+               if (!cpacf_query_func(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
                        if (prng_mode == PRNG_MODE_SHA512) {
                                pr_err("The prng module cannot "
                                       "start in SHA-512 mode\n");
index 5fbf91bbb47870758925a93165e1e070f3553081..c7de53d8da7553d58c797d7d43de6b23101aadf6 100644 (file)
@@ -91,7 +91,7 @@ static struct shash_alg alg = {
 
 static int __init sha1_s390_init(void)
 {
-       if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
+       if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1))
                return -EOPNOTSUPP;
        return crypto_register_shash(&alg);
 }
index 10aac0b119887aff4dc68d936b644def3b138086..53c277999a2866b3aea6e9cc412ab6fc55aeb124 100644 (file)
@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
 {
        int ret;
 
-       if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
+       if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
                return -EOPNOTSUPP;
        ret = crypto_register_shash(&sha256_alg);
        if (ret < 0)
index ea85757be407353d5ee415e97f763be1737c5f58..2f4caa1ef123d1e6ce1bbf332ba045510e12ae07 100644 (file)
@@ -133,7 +133,7 @@ static int __init init(void)
 {
        int ret;
 
-       if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
+       if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512))
                return -EOPNOTSUPP;
        if ((ret = crypto_register_shash(&sha512_alg)) < 0)
                goto out;
index c226c9ba24acdb207093799d4e24149f9ffddb2e..2c680db7e5c10a20f9182c1232032e9db9f54740 100644 (file)
 #define CPACF_PPNO_SHA512_DRNG_GEN     0x03
 #define CPACF_PPNO_SHA512_DRNG_SEED    0x83
 
+typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+
 /**
  * cpacf_query() - check if a specific CPACF function is available
  * @opcode: the opcode of the crypto instruction
  *
  * Returns 1 if @func is available for @opcode, 0 otherwise
  */
-static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
+static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
-       typedef struct { unsigned char _[16]; } status_type;
        register unsigned long r0 asm("0") = 0; /* query function */
-       register unsigned long r1 asm("1") = (unsigned long) status;
+       register unsigned long r1 asm("1") = (unsigned long) mask;
 
        asm volatile(
                "       spm 0\n" /* pckmo doesn't change the cc */
                /* Parameter registers are ignored, but may not be 0 */
                "0:     .insn   rrf,%[opc] << 16,2,2,2,0\n"
                "       brc     1,0b\n" /* handle partial completion */
-               : "=m" (*(status_type *) status)
+               : "=m" (*mask)
                : [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
                : "cc");
 }
 
-static inline int cpacf_query(unsigned int opcode, unsigned int func)
+static inline int __cpacf_check_opcode(unsigned int opcode)
 {
-       unsigned char status[16];
-
        switch (opcode) {
        case CPACF_KMAC:
        case CPACF_KM:
        case CPACF_KMC:
        case CPACF_KIMD:
        case CPACF_KLMD:
-               if (!test_facility(17)) /* check for MSA */
-                       return 0;
-               break;
+               return test_facility(17);       /* check for MSA */
        case CPACF_PCKMO:
-               if (!test_facility(76)) /* check for MSA3 */
-                       return 0;
-               break;
+               return test_facility(76);       /* check for MSA3 */
        case CPACF_KMF:
        case CPACF_KMO:
        case CPACF_PCC:
        case CPACF_KMCTR:
-               if (!test_facility(77)) /* check for MSA4 */
-                       return 0;
-               break;
+               return test_facility(77);       /* check for MSA4 */
        case CPACF_PPNO:
-               if (!test_facility(57)) /* check for MSA5 */
-                       return 0;
-               break;
+               return test_facility(57);       /* check for MSA5 */
        default:
                BUG();
        }
-       __cpacf_query(opcode, status);
-       return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+{
+       if (__cpacf_check_opcode(opcode)) {
+               __cpacf_query(opcode, mask);
+               return 1;
+       }
+       memset(mask, 0, sizeof(*mask));
+       return 0;
+}
+
+static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
+{
+       return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+{
+       cpacf_mask_t mask;
+
+       if (cpacf_query(opcode, &mask))
+               return cpacf_test_func(&mask, func);
+       return 0;
 }
 
 /**
index fd2f1201204e23a8ba0344dedaf82ebff8c85482..d6e7e527f0bf3963dd8593b50969be0731ad345b 100644 (file)
@@ -245,22 +245,33 @@ static void kvm_s390_cpu_feat_init(void)
                     PTFF_QAF);
 
        if (test_facility(17)) { /* MSA */
-               __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
-               __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
-               __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
-               __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
-               __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
+               __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kmac);
+               __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kmc);
+               __cpacf_query(CPACF_KM, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.km);
+               __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kimd);
+               __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.klmd);
        }
        if (test_facility(76)) /* MSA3 */
-               __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
+               __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.pckmo);
        if (test_facility(77)) { /* MSA4 */
-               __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
-               __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
-               __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
-               __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
+               __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kmctr);
+               __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kmf);
+               __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.kmo);
+               __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.pcc);
        }
        if (test_facility(57)) /* MSA5 */
-               __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
+               __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
+                             kvm_s390_available_subfunc.ppno);
 
        if (MACHINE_HAS_ESOP)
                allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);