EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req)
-{
- struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
-
- if (ctx->cache) {
- dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
- DMA_TO_DEVICE);
- kfree(ctx->cache);
- ctx->cache = NULL;
- ctx->cache_sz = 0;
- }
-}
-
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{
struct safexcel_command_desc *cdesc;
int ring;
bool needs_inv;
bool exit_inv;
-
- /* Used for ahash requests */
- void *cache;
- dma_addr_t cache_dma;
- unsigned int cache_sz;
};
/*
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_free_context(struct safexcel_crypto_priv *priv,
- struct crypto_async_request *req);
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
u64 processed;
u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+ dma_addr_t cache_dma;
+ unsigned int cache_sz;
+
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
};
sreq->result_dma = 0;
}
- safexcel_free_context(priv, async);
+ if (sreq->cache_dma) {
+ dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
+ DMA_TO_DEVICE);
+ sreq->cache_dma = 0;
+ }
cache_len = sreq->len - sreq->processed;
if (cache_len)
/* Add a command descriptor for the cached data, if any */
if (cache_len) {
- ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
- if (!ctx->base.cache) {
- ret = -ENOMEM;
- goto unlock;
- }
- memcpy(ctx->base.cache, req->cache, cache_len);
- ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
- cache_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
- ret = -EINVAL;
- goto free_cache;
- }
+ req->cache_dma = dma_map_single(priv->dev, req->cache,
+ cache_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, req->cache_dma))
+ return -EINVAL;
- ctx->base.cache_sz = cache_len;
+ req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len),
- ctx->base.cache_dma,
- cache_len, len,
+ req->cache_dma, cache_len, len,
ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc);
for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache:
- if (ctx->base.cache_dma) {
- dma_unmap_single(priv->dev, ctx->base.cache_dma,
- ctx->base.cache_sz, DMA_TO_DEVICE);
- ctx->base.cache_sz = 0;
+ if (req->cache_dma) {
+ dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
+ DMA_TO_DEVICE);
+ req->cache_sz = 0;
}
-free_cache:
- kfree(ctx->base.cache);
- ctx->base.cache = NULL;
-unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret;
}