return -ENOMEM;
}
-static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void ssi_aead_complete(struct device *dev, void *ssi_req)
{
struct aead_request *areq = (struct aead_request *)ssi_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
struct crypto_shash *shash_tfm;
};
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
+static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req);
static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
{
struct scatterlist *dst,
struct scatterlist *src,
unsigned int ivsize,
- void *areq,
- void __iomem *cc_base)
+ void *areq)
{
int completion_error = 0;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
dst);
} else {
rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
- src, ivsize, NULL,
- ctx_p->drvdata->cc_base);
+ src, ivsize, NULL);
}
}
return rc;
}
-static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req)
{
struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
- ivsize, areq, cc_base);
+ ivsize, areq);
}
/* Async wrap functions */
static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
- void __iomem *cc_base = NULL;
struct ssi_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
&req_mem_cc_regs->start, new_drvdata->cc_base);
- cc_base = new_drvdata->cc_base;
-
/* Then IRQ */
new_drvdata->irq = platform_get_irq(plat_dev, 0);
if (new_drvdata->irq < 0) {
#define SSI_MAX_IVGEN_DMA_ADDRESSES 3
struct ssi_crypto_req {
- void (*user_cb)(struct device *dev, void *req, void __iomem *cc_base);
+ void (*user_cb)(struct device *dev, void *req);
void *user_arg;
dma_addr_t ivgen_dma_addr[SSI_MAX_IVGEN_DMA_ADDRESSES];
/* For the first 'ivgen_dma_addr_len' addresses of this array,
state->digest_result_dma_addr = 0;
}
-static void ssi_hash_update_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void ssi_hash_update_complete(struct device *dev, void *ssi_req)
{
struct ahash_request *req = (struct ahash_request *)ssi_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
req->base.complete(&req->base, 0);
}
-static void ssi_hash_digest_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void ssi_hash_digest_complete(struct device *dev, void *ssi_req)
{
struct ahash_request *req = (struct ahash_request *)ssi_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
req->base.complete(&req->base, 0);
}
-static void ssi_hash_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
+static void ssi_hash_complete(struct device *dev, void *ssi_req)
{
struct ahash_request *req = (struct ahash_request *)ssi_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
* \param dev
* \param dx_compl_h The completion event to signal
*/
-static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
+static void request_mgr_complete(struct device *dev, void *dx_compl_h)
{
struct completion *this_compl = dx_compl_h;
#endif /* COMPLETION_DELAY */
if (likely(ssi_req->user_cb))
- ssi_req->user_cb(dev, ssi_req->user_arg,
- drvdata->cc_base);
+ ssi_req->user_cb(dev, ssi_req->user_arg);
request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n",
request_mgr_handle->req_queue_tail);