crypto: inside-secure - move cache result dma mapping to request
authorAntoine Tenart <antoine.tenart@bootlin.com>
Mon, 26 Feb 2018 13:45:11 +0000 (14:45 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 9 Mar 2018 14:45:30 +0000 (22:45 +0800)
In heavy traffic the DMA mapping is overwritten by multiple requests as
the DMA address is stored in a global context. This patch moves this
information to the per-hash request context so that it can't be
overwritten.

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel.h
drivers/crypto/inside-secure/safexcel_hash.c

index 0c33bdbe48fc3100e298275098fe254da90aa123..384b4ceb37f0e2ffe07229462641fd9e7757d8d0 100644 (file)
@@ -537,20 +537,6 @@ finalize:
               EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
 }
 
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
-                          struct crypto_async_request *req)
-{
-       struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
-
-       if (ctx->cache) {
-               dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
-                                DMA_TO_DEVICE);
-               kfree(ctx->cache);
-               ctx->cache = NULL;
-               ctx->cache_sz = 0;
-       }
-}
-
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
 {
        struct safexcel_command_desc *cdesc;
index 4e14c7e730c4eaec90206a7632b8916f82cac4c9..d8dff65fc311e25c6979392a3caba3b24ed37eef 100644 (file)
@@ -578,11 +578,6 @@ struct safexcel_context {
        int ring;
        bool needs_inv;
        bool exit_inv;
-
-       /* Used for ahash requests */
-       void *cache;
-       dma_addr_t cache_dma;
-       unsigned int cache_sz;
 };
 
 /*
@@ -606,8 +601,6 @@ struct safexcel_inv_result {
 
 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
-                                 struct crypto_async_request *req);
 int safexcel_invalidate_cache(struct crypto_async_request *async,
                              struct safexcel_crypto_priv *priv,
                              dma_addr_t ctxr_dma, int ring,
index e33f089185d6dd7e191288576adb485645a44157..4953a2a86c1088c2ec765a742b786faedc1c3136 100644 (file)
@@ -43,6 +43,9 @@ struct safexcel_ahash_req {
        u64 processed;
 
        u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+       dma_addr_t cache_dma;
+       unsigned int cache_sz;
+
        u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
 };
 
@@ -165,7 +168,11 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
                sreq->result_dma = 0;
        }
 
-       safexcel_free_context(priv, async);
+       if (sreq->cache_dma) {
+               dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+                                DMA_TO_DEVICE);
+               sreq->cache_dma = 0;
+       }
 
        cache_len = sreq->len - sreq->processed;
        if (cache_len)
@@ -227,24 +234,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 
        /* Add a command descriptor for the cached data, if any */
        if (cache_len) {
-               ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
-               if (!ctx->base.cache) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               memcpy(ctx->base.cache, req->cache, cache_len);
-               ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
-                                                    cache_len, DMA_TO_DEVICE);
-               if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
-                       ret = -EINVAL;
-                       goto free_cache;
-               }
+               req->cache_dma = dma_map_single(priv->dev, req->cache,
+                                               cache_len, DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->dev, req->cache_dma))
+                       return -EINVAL;
 
-               ctx->base.cache_sz = cache_len;
+               req->cache_sz = cache_len;
                first_cdesc = safexcel_add_cdesc(priv, ring, 1,
                                                 (cache_len == len),
-                                                ctx->base.cache_dma,
-                                                cache_len, len,
+                                                req->cache_dma, cache_len, len,
                                                 ctx->base.ctxr_dma);
                if (IS_ERR(first_cdesc)) {
                        ret = PTR_ERR(first_cdesc);
@@ -328,16 +326,12 @@ cdesc_rollback:
        for (i = 0; i < n_cdesc; i++)
                safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 unmap_cache:
-       if (ctx->base.cache_dma) {
-               dma_unmap_single(priv->dev, ctx->base.cache_dma,
-                                ctx->base.cache_sz, DMA_TO_DEVICE);
-               ctx->base.cache_sz = 0;
+       if (req->cache_dma) {
+               dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+                                DMA_TO_DEVICE);
+               req->cache_sz = 0;
        }
-free_cache:
-       kfree(ctx->base.cache);
-       ctx->base.cache = NULL;
 
-unlock:
        spin_unlock_bh(&priv->ring[ring].egress_lock);
        return ret;
 }