crypto: inside-secure - use one queue per hw ring
authorAntoine Ténart <antoine.tenart@free-electrons.com>
Thu, 15 Jun 2017 07:56:24 +0000 (09:56 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Tue, 20 Jun 2017 03:21:45 +0000 (11:21 +0800)
Update the inside-secure safexcel driver from using one global queue to
one queue per hw ring. This ease the request management and keep the hw
in sync with what's done in sw.

Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel.h
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/inside-secure/safexcel_hash.c

index 73f4ef8d71f36df1d19a8411ab4c0bb32474e68a..8956b23803a8a62ad0de27e86ad655328d64fd91 100644 (file)
@@ -422,20 +422,18 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
        return 0;
 }
 
-void safexcel_dequeue(struct safexcel_crypto_priv *priv)
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 {
        struct crypto_async_request *req, *backlog;
        struct safexcel_context *ctx;
        struct safexcel_request *request;
-       int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0};
-       int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0};
-       int commands, results;
+       int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
        do {
-               spin_lock_bh(&priv->lock);
-               req = crypto_dequeue_request(&priv->queue);
-               backlog = crypto_get_backlog(&priv->queue);
-               spin_unlock_bh(&priv->lock);
+               spin_lock_bh(&priv->ring[ring].queue_lock);
+               req = crypto_dequeue_request(&priv->ring[ring].queue);
+               backlog = crypto_get_backlog(&priv->ring[ring].queue);
+               spin_unlock_bh(&priv->ring[ring].queue_lock);
 
                if (!req)
                        goto finalize;
@@ -445,58 +443,51 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv)
                        goto requeue;
 
                ctx = crypto_tfm_ctx(req->tfm);
-               ret = ctx->send(req, ctx->ring, request, &commands, &results);
+               ret = ctx->send(req, ring, request, &commands, &results);
                if (ret) {
                        kfree(request);
 requeue:
-                       spin_lock_bh(&priv->lock);
-                       crypto_enqueue_request(&priv->queue, req);
-                       spin_unlock_bh(&priv->lock);
+                       spin_lock_bh(&priv->ring[ring].queue_lock);
+                       crypto_enqueue_request(&priv->ring[ring].queue, req);
+                       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-                       priv->need_dequeue = true;
+                       priv->ring[ring].need_dequeue = true;
                        continue;
                }
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
 
-               spin_lock_bh(&priv->ring[ctx->ring].egress_lock);
-               list_add_tail(&request->list, &priv->ring[ctx->ring].list);
-               spin_unlock_bh(&priv->ring[ctx->ring].egress_lock);
-
-               cdesc[ctx->ring] += commands;
-               rdesc[ctx->ring] += results;
+               spin_lock_bh(&priv->ring[ring].egress_lock);
+               list_add_tail(&request->list, &priv->ring[ring].list);
+               spin_unlock_bh(&priv->ring[ring].egress_lock);
 
-               nreq[ctx->ring]++;
-       } while (n++ < EIP197_MAX_BATCH_SZ);
+               cdesc += commands;
+               rdesc += results;
+       } while (nreq++ < EIP197_MAX_BATCH_SZ);
 
 finalize:
-       if (n == EIP197_MAX_BATCH_SZ)
-               priv->need_dequeue = true;
-       else if (!n)
+       if (nreq == EIP197_MAX_BATCH_SZ)
+               priv->ring[ring].need_dequeue = true;
+       else if (!nreq)
                return;
 
-       for (i = 0; i < priv->config.rings; i++) {
-               if (!nreq[i])
-                       continue;
+       spin_lock_bh(&priv->ring[ring].lock);
 
-               spin_lock_bh(&priv->ring[i].lock);
+       /* Configure when we want an interrupt */
+       writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+              EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
+              priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
 
-               /* Configure when we want an interrupt */
-               writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
-                      EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]),
-                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH);
+       /* let the RDR know we have pending descriptors */
+       writel((rdesc * priv->config.rd_offset) << 2,
+              priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
 
-               /* let the RDR know we have pending descriptors */
-               writel((rdesc[i] * priv->config.rd_offset) << 2,
-                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+       /* let the CDR know we have pending descriptors */
+       writel((cdesc * priv->config.cd_offset) << 2,
+              priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
 
-               /* let the CDR know we have pending descriptors */
-               writel((cdesc[i] * priv->config.cd_offset) << 2,
-                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
-
-               spin_unlock_bh(&priv->ring[i].lock);
-       }
+       spin_unlock_bh(&priv->ring[ring].lock);
 }
 
 void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -638,9 +629,9 @@ static void safexcel_handle_result_work(struct work_struct *work)
 
        safexcel_handle_result_descriptor(priv, data->ring);
 
-       if (priv->need_dequeue) {
-               priv->need_dequeue = false;
-               safexcel_dequeue(data->priv);
+       if (priv->ring[data->ring].need_dequeue) {
+               priv->ring[data->ring].need_dequeue = false;
+               safexcel_dequeue(data->priv, data->ring);
        }
 }
 
@@ -864,17 +855,18 @@ static int safexcel_probe(struct platform_device *pdev)
                        goto err_clk;
                }
 
+               crypto_init_queue(&priv->ring[i].queue,
+                                 EIP197_DEFAULT_RING_SIZE);
+
                INIT_LIST_HEAD(&priv->ring[i].list);
                spin_lock_init(&priv->ring[i].lock);
                spin_lock_init(&priv->ring[i].egress_lock);
+               spin_lock_init(&priv->ring[i].queue_lock);
        }
 
        platform_set_drvdata(pdev, priv);
        atomic_set(&priv->ring_used, 0);
 
-       spin_lock_init(&priv->lock);
-       crypto_init_queue(&priv->queue, EIP197_DEFAULT_RING_SIZE);
-
        ret = safexcel_hw_init(priv);
        if (ret) {
                dev_err(dev, "EIP h/w init failed (%d)\n", ret);
index 7e3cbb9ac98ef5a89737b3877a45e70732f0a704..abe0f59d147301aceeda4c0eeb1c933369457569 100644 (file)
@@ -469,11 +469,6 @@ struct safexcel_crypto_priv {
        struct clk *clk;
        struct safexcel_config config;
 
-       spinlock_t lock;
-       struct crypto_queue queue;
-
-       bool need_dequeue;
-
        /* context DMA pool */
        struct dma_pool *context_pool;
 
@@ -490,6 +485,11 @@ struct safexcel_crypto_priv {
                /* command/result rings */
                struct safexcel_ring cdr;
                struct safexcel_ring rdr;
+
+               /* queue */
+               struct crypto_queue queue;
+               spinlock_t queue_lock;
+               bool need_dequeue;
        } ring[EIP197_MAX_RINGS];
 };
 
@@ -533,7 +533,7 @@ struct safexcel_inv_result {
        int error;
 };
 
-void safexcel_dequeue(struct safexcel_crypto_priv *priv);
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
 void safexcel_free_context(struct safexcel_crypto_priv *priv,
                                  struct crypto_async_request *req,
index 6037cdfc1f169e71fd2ad89979a2071e7903bc33..d2207ac5ba19cdd357ab5bbae3a128d1fa92183e 100644 (file)
@@ -339,18 +339,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
                return ndesc;
        }
 
+       ring = safexcel_select_ring(priv);
+       ctx->base.ring = ring;
        ctx->base.needs_inv = false;
-       ctx->base.ring = safexcel_select_ring(priv);
        ctx->base.send = safexcel_aes_send;
 
-       spin_lock_bh(&priv->lock);
-       enq_ret = crypto_enqueue_request(&priv->queue, async);
-       spin_unlock_bh(&priv->lock);
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
        if (enq_ret != -EINPROGRESS)
                *ret = enq_ret;
 
-       priv->need_dequeue = true;
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
+
        *should_complete = false;
 
        return ndesc;
@@ -384,6 +387,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
        struct safexcel_crypto_priv *priv = ctx->priv;
        struct skcipher_request req;
        struct safexcel_inv_result result = { 0 };
+       int ring = ctx->base.ring;
 
        memset(&req, 0, sizeof(struct skcipher_request));
 
@@ -397,12 +401,12 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
        ctx->base.exit_inv = true;
        ctx->base.send = safexcel_cipher_send_inv;
 
-       spin_lock_bh(&priv->lock);
-       crypto_enqueue_request(&priv->queue, &req.base);
-       spin_unlock_bh(&priv->lock);
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->need_dequeue)
-               safexcel_dequeue(priv);
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
 
        wait_for_completion_interruptible(&result.completion);
 
@@ -421,7 +425,7 @@ static int safexcel_aes(struct skcipher_request *req,
 {
        struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct safexcel_crypto_priv *priv = ctx->priv;
-       int ret;
+       int ret, ring;
 
        ctx->direction = dir;
        ctx->mode = mode;
@@ -440,12 +444,14 @@ static int safexcel_aes(struct skcipher_request *req,
                        return -ENOMEM;
        }
 
-       spin_lock_bh(&priv->lock);
-       ret = crypto_enqueue_request(&priv->queue, &req->base);
-       spin_unlock_bh(&priv->lock);
+       ring = ctx->base.ring;
+
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->need_dequeue)
-               safexcel_dequeue(priv);
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
 
        return ret;
 }
index 4e526372464fde454c30cef7b6ad6ab9515a7d31..8527a5899a2f7b6a3245a4a52ca4c0283b2f4666 100644 (file)
@@ -374,18 +374,21 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
                return 1;
        }
 
-       ctx->base.ring = safexcel_select_ring(priv);
+       ring = safexcel_select_ring(priv);
+       ctx->base.ring = ring;
        ctx->base.needs_inv = false;
        ctx->base.send = safexcel_ahash_send;
 
-       spin_lock_bh(&priv->lock);
-       enq_ret = crypto_enqueue_request(&priv->queue, async);
-       spin_unlock_bh(&priv->lock);
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
        if (enq_ret != -EINPROGRESS)
                *ret = enq_ret;
 
-       priv->need_dequeue = true;
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
+
        *should_complete = false;
 
        return 1;
@@ -417,6 +420,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
        struct safexcel_crypto_priv *priv = ctx->priv;
        struct ahash_request req;
        struct safexcel_inv_result result = { 0 };
+       int ring = ctx->base.ring;
 
        memset(&req, 0, sizeof(struct ahash_request));
 
@@ -430,12 +434,12 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
        ctx->base.exit_inv = true;
        ctx->base.send = safexcel_ahash_send_inv;
 
-       spin_lock_bh(&priv->lock);
-       crypto_enqueue_request(&priv->queue, &req.base);
-       spin_unlock_bh(&priv->lock);
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->need_dequeue)
-               safexcel_dequeue(priv);
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
 
        wait_for_completion_interruptible(&result.completion);
 
@@ -477,7 +481,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
        struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
        struct safexcel_ahash_req *req = ahash_request_ctx(areq);
        struct safexcel_crypto_priv *priv = ctx->priv;
-       int ret;
+       int ret, ring;
 
        ctx->base.send = safexcel_ahash_send;
 
@@ -496,12 +500,14 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
                        return -ENOMEM;
        }
 
-       spin_lock_bh(&priv->lock);
-       ret = crypto_enqueue_request(&priv->queue, &areq->base);
-       spin_unlock_bh(&priv->lock);
+       ring = ctx->base.ring;
+
+       spin_lock_bh(&priv->ring[ring].queue_lock);
+       ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
+       spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->need_dequeue)
-               safexcel_dequeue(priv);
+       if (!priv->ring[ring].need_dequeue)
+               safexcel_dequeue(priv, ring);
 
        return ret;
 }