struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int err = 0;
- ssi_buffer_mgr_unmap_aead_request(dev, areq);
+ cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
areq->iv = areq_ctx->backup_iv;
/* In case of payload authentication failure, MUST NOT
* revealed the decrypted message --> zero its memory.
*/
- ssi_buffer_mgr_zero_sgl(areq->dst, areq_ctx->cryptlen);
+ cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
err = -EBADMSG;
}
} else { /*ENCRYPT*/
- if (unlikely(areq_ctx->is_icv_fragmented))
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
- areq->cryptlen + areq_ctx->dst_offset,
- (areq->cryptlen + areq_ctx->dst_offset +
- ctx->authsize),
- SSI_SG_FROM_BUF);
+ if (unlikely(areq_ctx->is_icv_fragmented)) {
+ cc_copy_sg_portion(dev, areq_ctx->mac_buf,
+ areq_ctx->dst_sgl,
+ (areq->cryptlen +
+ areq_ctx->dst_offset),
+ (areq->cryptlen +
+ areq_ctx->dst_offset +
+ ctx->authsize),
+ SSI_SG_FROM_BUF);
+ }
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv) {
}
#endif /*SSI_CC_HAS_AES_GCM*/
- rc = ssi_buffer_mgr_map_aead_request(ctx->drvdata, req);
+ rc = cc_map_aead_request(ctx->drvdata, req);
if (unlikely(rc != 0)) {
dev_err(dev, "map_request() failed\n");
goto exit;
#endif
default:
dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
rc = -ENOTSUPP;
goto exit;
}
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
}
exit:
INIT_LIST_HEAD(&aead_handle->aead_list);
drvdata->aead_handle = aead_handle;
- aead_handle->sram_workspace_addr = ssi_sram_mgr_alloc(
- drvdata, MAX_HMAC_DIGEST_SIZE);
+ aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
+ MAX_HMAC_DIGEST_SIZE);
+
if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
};
/**
- * ssi_buffer_mgr_get_sgl_nents() - Get scatterlist number of entries.
+ * cc_get_sgl_nents() - Get scatterlist number of entries.
*
* @sg_list: SG list
* @nbytes: [IN] Total SGL data bytes.
* @lbytes: [OUT] Returns the amount of bytes at the last entry
*/
-static unsigned int ssi_buffer_mgr_get_sgl_nents(
+static unsigned int cc_get_sgl_nents(
struct device *dev, struct scatterlist *sg_list,
unsigned int nbytes, u32 *lbytes, bool *is_chained)
{
}
/**
- * ssi_buffer_mgr_zero_sgl() - Zero scatter scatter list data.
+ * cc_zero_sgl() - Zero scatter scatter list data.
*
* @sgl:
*/
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len)
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
{
struct scatterlist *current_sg = sgl;
int sg_index = 0;
}
/**
- * ssi_buffer_mgr_copy_scatterlist_portion() - Copy scatter list data,
+ * cc_copy_sg_portion() - Copy scatter list data,
* from to_skip to end, to dest and vice versa
*
* @dest:
* @end:
* @direct:
*/
-void ssi_buffer_mgr_copy_scatterlist_portion(
+void cc_copy_sg_portion(
struct device *dev, u8 *dest,
struct scatterlist *sg, u32 to_skip,
u32 end, enum ssi_sg_cpy_direct direct)
{
u32 nents, lbytes;
- nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == SSI_SG_TO_BUF));
}
-static inline int ssi_buffer_mgr_render_buff_to_mlli(
+static inline int cc_render_buff_to_mlli(
struct device *dev, dma_addr_t buff_dma, u32 buff_size,
u32 *curr_nents, u32 **mlli_entry_pp)
{
return 0;
}
-static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
+static inline int cc_render_sg_to_mlli(
struct device *dev, struct scatterlist *sgl,
u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
u32 **mlli_entry_pp)
sg_dma_len(curr_sgl) - sgl_offset :
sgl_data_len;
sgl_data_len -= entry_data_len;
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_dma_address(curr_sgl) + sgl_offset,
- entry_data_len, curr_nents, &mlli_entry_p);
+ rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
+ sgl_offset, entry_data_len,
+ curr_nents, &mlli_entry_p);
if (rc != 0)
return rc;
return 0;
}
-static int ssi_buffer_mgr_generate_mlli(
+static int cc_generate_mlli(
struct device *dev,
struct buffer_array *sg_data,
struct mlli_params *mlli_params)
/* go over all SG's and link it to one MLLI table */
for (i = 0; i < sg_data->num_of_buffers; i++) {
if (sg_data->type[i] == DMA_SGL_TYPE)
- rc = ssi_buffer_mgr_render_scatterlist_to_mlli(
- dev, sg_data->entry[i].sgl,
- sg_data->total_data_len[i], sg_data->offset[i],
- &total_nents, &mlli_p);
+ rc = cc_render_sg_to_mlli(dev, sg_data->entry[i].sgl,
+ sg_data->total_data_len[i],
+ sg_data->offset[i],
+ &total_nents, &mlli_p);
else /*DMA_BUFF_TYPE*/
- rc = ssi_buffer_mgr_render_buff_to_mlli(
- dev, sg_data->entry[i].buffer_dma,
- sg_data->total_data_len[i], &total_nents,
- &mlli_p);
+ rc = cc_render_buff_to_mlli(dev,
+ sg_data->entry[i].buffer_dma,
+ sg_data->total_data_len[i],
+ &total_nents, &mlli_p);
if (rc != 0)
return rc;
return rc;
}
-static inline void ssi_buffer_mgr_add_buffer_entry(
+static inline void cc_add_buffer_entry(
struct device *dev, struct buffer_array *sgl_data,
dma_addr_t buffer_dma, unsigned int buffer_len,
bool is_last_entry, u32 *mlli_nents)
sgl_data->num_of_buffers++;
}
-static inline void ssi_buffer_mgr_add_scatterlist_entry(
+static inline void cc_add_sg_entry(
struct device *dev,
struct buffer_array *sgl_data,
unsigned int nents,
}
static int
-ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
+cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
+ enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
return 0;
}
-static int ssi_buffer_mgr_map_scatterlist(
+static int cc_map_sg(
struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction,
u32 *nents, u32 max_sg_nents,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = ssi_buffer_mgr_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
+ &is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
- *mapped_nents = ssi_buffer_mgr_dma_map_sg(dev,
- sg,
- *nents,
- direction);
+ *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
+ direction);
if (unlikely(*mapped_nents != *nents)) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
/* prepare for case of MLLI */
if (assoclen > 0) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1,
- &areq_ctx->ccm_adata_sg,
- (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
- 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
+ (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
+ 0, false, NULL);
}
return 0;
}
areq_ctx->curr_sg = areq_ctx->buff_sg;
areq_ctx->in_nents = 0;
/* prepare for case of MLLI */
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data, 1, areq_ctx->buff_sg,
- curr_buff_cnt, 0, false, NULL);
+ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
+ false, NULL);
return 0;
}
-void ssi_buffer_mgr_unmap_blkcipher_request(
+void cc_unmap_blkcipher_request(
struct device *dev,
void *ctx,
unsigned int ivsize,
}
}
-int ssi_buffer_mgr_map_blkcipher_request(
+int cc_map_blkcipher_request(
struct ssi_drvdata *drvdata,
void *ctx,
unsigned int ivsize,
}
/* Map the src SGL */
- rc = ssi_buffer_mgr_map_scatterlist(dev, src,
- nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents);
+ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto ablkcipher_exit;
/* Handle inplace operation */
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
req_ctx->out_nents = 0;
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
}
} else {
/* Map the dst sg */
- if (unlikely(ssi_buffer_mgr_map_scatterlist(
- dev, dst, nbytes,
- DMA_BIDIRECTIONAL, &req_ctx->out_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents))){
+ if (unlikely(cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents))) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
if (unlikely((req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI))) {
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->in_nents,
- src, nbytes, 0,
- true,
- &req_ctx->in_mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- req_ctx->out_nents,
- dst, nbytes, 0,
- true,
- &req_ctx->out_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+ nbytes, 0, true,
+ &req_ctx->in_mlli_nents);
+ cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+ nbytes, 0, true,
+ &req_ctx->out_mlli_nents);
}
}
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0))
goto ablkcipher_exit;
}
return 0;
ablkcipher_exit:
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
return rc;
}
-void ssi_buffer_mgr_unmap_aead_request(
- struct device *dev, struct aead_request *req)
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src,
- ssi_buffer_mgr_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->src, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
- ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_to_unmap,
- &dummy, &chained),
+ cc_get_sgl_nents(dev, req->dst, size_to_unmap,
+ &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
/* copy mac to a temporary location to deal with possible
* data memory overriding that caused by cache coherence problem.
*/
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (size_to_skip + req->cryptlen -
+ areq_ctx->req_authsize),
+ (size_to_skip + req->cryptlen),
+ SSI_SG_FROM_BUF);
}
}
-static inline int ssi_buffer_mgr_get_aead_icv_nents(
+static inline int cc_get_aead_icv_nents(
struct device *dev,
struct scatterlist *sgl,
unsigned int sgl_nents,
return nents;
}
-static inline int ssi_buffer_mgr_aead_chain_iv(
+static inline int cc_aead_chain_iv(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
/* Chain to given list */
- ssi_buffer_mgr_add_buffer_entry(
- dev, sg_data,
- areq_ctx->gen_ctx.iv_dma_addr + iv_ofs,
- iv_size_to_authenc, is_last,
- &areq_ctx->assoc.mlli_nents);
+ cc_add_buffer_entry(dev, sg_data,
+ (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs),
+ iv_size_to_authenc, is_last,
+ &areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
}
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_assoc(
+static inline int cc_aead_chain_assoc(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
- ssi_buffer_mgr_add_scatterlist_entry(
- dev, sg_data, areq_ctx->assoc.nents,
- req->src, req->assoclen, 0, is_last,
- &areq_ctx->assoc.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
+ req->assoclen, 0, is_last,
+ &areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
}
return rc;
}
-static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
+static inline void cc_prepare_aead_data_dlli(
struct aead_request *req,
u32 *src_last_bytes, u32 *dst_last_bytes)
{
}
}
-static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
+static inline int cc_prepare_aead_data_mlli(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
if (likely(req->src == req->dst)) {
/*INPLACE*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac,
- req->src,
- (skip + req->cryptlen -
- areq_ctx->req_authsize),
- skip + req->cryptlen,
- SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev,
+ areq_ctx->backup_mac,
+ req->src,
+ (skip + req->cryptlen - areq_ctx->req_authsize),
+ (skip + req->cryptlen),
+ SSI_SG_TO_BUF);
}
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else {
} else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
/*NON-INPLACE and DECRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize,
- *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
+ areq_ctx->src.nents,
+ authsize, *src_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
if (areq_ctx->is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac,
+ req->src,
+ (size_to_skip + req->cryptlen - areq_ctx->req_authsize),
+ (size_to_skip + req->cryptlen),
+ SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
} else {
/*NON-INPLACE and ENCRYPT*/
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->dst.nents,
- areq_ctx->dst_sgl,
- areq_ctx->cryptlen,
- areq_ctx->dst_offset,
- is_last_table,
- &areq_ctx->dst.mlli_nents);
- ssi_buffer_mgr_add_scatterlist_entry(dev, sg_data,
- areq_ctx->src.nents,
- areq_ctx->src_sgl,
- areq_ctx->cryptlen,
- areq_ctx->src_offset,
- is_last_table,
- &areq_ctx->src.mlli_nents);
-
- icv_nents = ssi_buffer_mgr_get_aead_icv_nents(dev,
- areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize,
- *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
+ areq_ctx->dst_sgl, areq_ctx->cryptlen,
+ areq_ctx->dst_offset, is_last_table,
+ &areq_ctx->dst.mlli_nents);
+ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
+ areq_ctx->src_sgl, areq_ctx->cryptlen,
+ areq_ctx->src_offset, is_last_table,
+ &areq_ctx->src.mlli_nents);
+
+ icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
+ areq_ctx->dst.nents,
+ authsize, *dst_last_bytes,
+ &areq_ctx->is_icv_fragmented);
if (unlikely(icv_nents < 0)) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
return rc;
}
-static inline int ssi_buffer_mgr_aead_chain_data(
+static inline int cc_aead_chain_data(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
size_for_map += crypto_aead_ivsize(tfm);
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->src,
- size_for_map,
- &src_last_bytes,
- &chained);
+ src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
+ &src_last_bytes, &chained);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
- DMA_BIDIRECTIONAL,
- &areq_ctx->dst.nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dst_last_bytes,
- &dst_mapped_nents);
+ rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->dst.nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
+ &dst_mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto chain_data_exit;
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(dev, req->dst,
- size_for_map,
- &dst_last_bytes,
- &chained);
+ dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
+ &dst_last_bytes, &chained);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
(dst_mapped_nents > 1) ||
do_chain) {
areq_ctx->data_buff_type = SSI_DMA_BUF_MLLI;
- rc = ssi_buffer_mgr_prepare_aead_data_mlli(drvdata, req,
- sg_data,
- &src_last_bytes,
- &dst_last_bytes,
- is_last_table);
+ rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes,
+ &dst_last_bytes, is_last_table);
} else {
areq_ctx->data_buff_type = SSI_DMA_BUF_DLLI;
- ssi_buffer_mgr_prepare_aead_data_dlli(
- req, &src_last_bytes, &dst_last_bytes);
+ cc_prepare_aead_data_dlli(req, &src_last_bytes,
+ &dst_last_bytes);
}
chain_data_exit:
return rc;
}
-static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
- struct aead_request *req)
+static void cc_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
+ struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
u32 curr_mlli_size = 0;
}
}
-int ssi_buffer_mgr_map_aead_request(
+int cc_map_aead_request(
struct ssi_drvdata *drvdata, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
/* copy mac to a temporary location to deal with possible
* data memory overriding that caused by cache coherence problem.
*/
- ssi_buffer_mgr_copy_scatterlist_portion(
- dev, areq_ctx->backup_mac, req->src,
- size_to_skip + req->cryptlen - areq_ctx->req_authsize,
- size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
+ (size_to_skip + req->cryptlen - areq_ctx->req_authsize),
+ (size_to_skip + req->cryptlen),
+ SSI_SG_TO_BUF);
}
/* cacluate the size for cipher remove ICV in decrypt*/
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
- rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
- size_to_map, DMA_BIDIRECTIONAL, &areq_ctx->src.nents,
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL,
+ &areq_ctx->src.nents,
+ (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
+ LLI_MAX_NUM_OF_DATA_ENTRIES),
+ &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto aead_map_failure;
* (2) Src/Dst SGLs
* Note: IV is contg. buffer (not an SGL)
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, true, false);
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
if (unlikely(rc != 0))
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, true, false);
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
if (unlikely(rc != 0))
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, false);
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
if (unlikely(rc != 0))
goto aead_map_failure;
} else { /* DOUBLE-PASS flow */
* (3) MLLI for src
* (4) MLLI for dst
*/
- rc = ssi_buffer_mgr_aead_chain_assoc(drvdata, req, &sg_data, false, true);
+ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
if (unlikely(rc != 0))
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_iv(drvdata, req, &sg_data, false, true);
+ rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
if (unlikely(rc != 0))
goto aead_map_failure;
- rc = ssi_buffer_mgr_aead_chain_data(drvdata, req, &sg_data, true, true);
+ rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
if (unlikely(rc != 0))
goto aead_map_failure;
}
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
- rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
+ rc = cc_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0))
goto aead_map_failure;
- ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
+ cc_update_aead_mlli_nents(drvdata, req);
dev_dbg(dev, "assoc params mn %d\n",
areq_ctx->assoc.mlli_nents);
dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
return 0;
aead_map_failure:
- ssi_buffer_mgr_unmap_aead_request(dev, req);
+ cc_unmap_aead_request(dev, req);
return rc;
}
-int ssi_buffer_mgr_map_hash_request_final(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update)
+int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
}
if (src && (nbytes > 0) && do_update) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src, nbytes,
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (unlikely(cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents))) {
goto unmap_curr_buff;
}
if (src && (mapped_nents == 1)
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src, nbytes, 0, true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
+ 0, true, &areq_ctx->mlli_nents);
+ if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params) != 0)) {
goto fail_unmap_din;
}
}
return -ENOMEM;
}
-int ssi_buffer_mgr_map_hash_request_update(
- struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size)
+int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
struct device *dev = drvdata_to_dev(drvdata);
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
- ssi_buffer_mgr_get_sgl_nents(dev, src, nbytes, &dummy,
- NULL);
+ cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
next_buff, (update_data_len - *curr_buff_cnt),
*next_buff_cnt);
- ssi_buffer_mgr_copy_scatterlist_portion(dev, next_buff, src,
- (update_data_len - *curr_buff_cnt),
- nbytes, SSI_SG_TO_BUF);
+ cc_copy_sg_portion(dev, next_buff, src,
+ (update_data_len - *curr_buff_cnt),
+ nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
}
if (update_data_len > *curr_buff_cnt) {
- if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy,
- &mapped_nents))){
+ if (unlikely(cc_map_sg(dev, src,
+ (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents))) {
goto unmap_curr_buff;
}
if ((mapped_nents == 1)
if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
- ssi_buffer_mgr_add_scatterlist_entry(dev, &sg_data,
- areq_ctx->in_nents,
- src,
- (update_data_len - *curr_buff_cnt),
- 0,
- true,
- &areq_ctx->mlli_nents);
- if (unlikely(ssi_buffer_mgr_generate_mlli(dev, &sg_data,
- mlli_params) != 0)) {
+ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
+ (update_data_len - *curr_buff_cnt), 0, true,
+ &areq_ctx->mlli_nents);
+ if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params) != 0)) {
goto fail_unmap_din;
}
}
return -ENOMEM;
}
-void ssi_buffer_mgr_unmap_hash_request(
- struct device *dev, void *ctx, struct scatterlist *src, bool do_revert)
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert)
{
struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
u32 *prev_len = areq_ctx->buff_index ? &areq_ctx->buff0_cnt :
}
}
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
return 0;
error:
- ssi_buffer_mgr_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
return -ENOMEM;
}
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
u32 mlli_len;
};
-int ssi_buffer_mgr_init(struct ssi_drvdata *drvdata);
+int cc_buffer_mgr_init(struct ssi_drvdata *drvdata);
-int ssi_buffer_mgr_fini(struct ssi_drvdata *drvdata);
+int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata);
-int ssi_buffer_mgr_map_blkcipher_request(
- struct ssi_drvdata *drvdata,
- void *ctx,
- unsigned int ivsize,
- unsigned int nbytes,
- void *info,
- struct scatterlist *src,
- struct scatterlist *dst);
+int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
+ unsigned int ivsize, unsigned int nbytes,
+ void *info, struct scatterlist *src,
+ struct scatterlist *dst);
-void ssi_buffer_mgr_unmap_blkcipher_request(
- struct device *dev,
- void *ctx,
- unsigned int ivsize,
- struct scatterlist *src,
- struct scatterlist *dst);
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+ unsigned int ivsize,
+ struct scatterlist *src,
+ struct scatterlist *dst);
-int ssi_buffer_mgr_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
+int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
-void ssi_buffer_mgr_unmap_aead_request(struct device *dev, struct aead_request *req);
+void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
-int ssi_buffer_mgr_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update);
+int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ bool do_update);
-int ssi_buffer_mgr_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size);
+int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
+ struct scatterlist *src, unsigned int nbytes,
+ unsigned int block_size);
-void ssi_buffer_mgr_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert);
+void cc_unmap_hash_request(struct device *dev, void *ctx,
+ struct scatterlist *src, bool do_revert);
-void ssi_buffer_mgr_copy_scatterlist_portion(struct device *dev, u8 *dest,
- struct scatterlist *sg,
- u32 to_skip, u32 end,
- enum ssi_sg_cpy_direct direct);
+void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
+ u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
-void ssi_buffer_mgr_zero_sgl(struct scatterlist *sgl, u32 data_len);
+void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
#endif /*__BUFFER_MGR_H__*/
int completion_error = 0;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
kfree(req_ctx->iv);
if (areq) {
/* STAT_PHASE_1: Map buffers */
- rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx,
- ivsize, nbytes, req_ctx->iv,
- src, dst);
+ rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+ req_ctx->iv, src, dst);
if (unlikely(rc != 0)) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
if (areq) {
if (unlikely(rc != -EINPROGRESS)) {
/* Failed to send the request or request completed synchronously */
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src,
+ dst);
}
} else {
if (rc != 0) {
- ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+ cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src,
+ dst);
} else {
rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
src, ivsize, NULL,
}
new_drvdata->mlli_sram_addr =
- ssi_sram_mgr_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
+ cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
goto post_sram_mgr_err;
}
- rc = ssi_buffer_mgr_init(new_drvdata);
+ rc = cc_buffer_mgr_init(new_drvdata);
if (unlikely(rc != 0)) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
- rc = ssi_power_mgr_init(new_drvdata);
+ rc = cc_pm_init(new_drvdata);
if (unlikely(rc != 0)) {
- dev_err(dev, "ssi_power_mgr_init failed\n");
+ dev_err(dev, "cc_pm_init failed\n");
goto post_buf_mgr_err;
}
post_ivgen_err:
ssi_ivgen_fini(new_drvdata);
post_power_mgr_err:
- ssi_power_mgr_fini(new_drvdata);
+ cc_pm_fini(new_drvdata);
post_buf_mgr_err:
- ssi_buffer_mgr_fini(new_drvdata);
+ cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
request_mgr_fini(new_drvdata);
post_sram_mgr_err:
ssi_hash_free(drvdata);
ssi_ablkcipher_free(drvdata);
ssi_ivgen_fini(drvdata);
- ssi_power_mgr_fini(drvdata);
- ssi_buffer_mgr_fini(drvdata);
+ cc_pm_fini(drvdata);
+ cc_buffer_mgr_fini(drvdata);
request_mgr_fini(drvdata);
ssi_sram_mgr_fini(drvdata);
ssi_fips_fini(drvdata);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
static const struct dev_pm_ops arm_cc7x_driver_pm = {
- SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL)
};
#endif
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_hash_request(dev, state, req->src, false);
req->base.complete(&req->base, 0);
}
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
ssi_hash_unmap_request(dev, state, ctx);
req->base.complete(&req->base, 0);
dev_dbg(dev, "req=%pK\n", req);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_hash_request(dev, state, req->src, false);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
ssi_hash_unmap_request(dev, state, ctx);
req->base.complete(&req->base, 0);
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state,
+ src, nbytes, 1) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
} else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ cc_unmap_hash_request(dev, state, src, false);
}
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
return 0;
}
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size);
+ rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
+ block_size);
if (unlikely(rc)) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
} else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ cc_unmap_hash_request(dev, state, src, false);
}
}
return rc;
dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
+ nbytes, 1) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ cc_unmap_hash_request(dev, state, src, false);
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
}
dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
+ nbytes, 0) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
}
} else {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
if (rc != 0) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, true);
+ cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
} else {
- ssi_buffer_mgr_unmap_hash_request(dev, state, src, false);
+ cc_unmap_hash_request(dev, state, src, false);
ssi_hash_unmap_result(dev, state, digestsize, result);
ssi_hash_unmap_request(dev, state, ctx);
}
state->xcbc_count++;
- rc = ssi_buffer_mgr_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size);
+ rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
+ req->nbytes, block_size);
if (unlikely(rc)) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
}
return rc;
}
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
return rc;
return ssi_mac_final(req);
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
}
return rc;
return -ENOMEM;
}
- if (unlikely(ssi_buffer_mgr_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1) != 0)) {
+ if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1) != 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
if (unlikely(rc != -EINPROGRESS)) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- ssi_buffer_mgr_unmap_hash_request(dev, state, req->src, true);
+ cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
ssi_hash_unmap_request(dev, state, ctx);
}
#endif
/* Copy-to-sram digest-len */
- ssi_sram_mgr_const2sram_desc(digest_len_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
#if (DX_DEV_SHA_MAX > 256)
/* Copy-to-sram digest-len for sha384/512 */
- ssi_sram_mgr_const2sram_desc(digest_len_sha512_init, sram_buff_ofs,
- ARRAY_SIZE(digest_len_sha512_init),
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
+ ARRAY_SIZE(digest_len_sha512_init),
+ larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
hash_handle->larval_digest_sram_addr = sram_buff_ofs;
/* Copy-to-sram initial SHA* digests */
- ssi_sram_mgr_const2sram_desc(md5_init, sram_buff_ofs,
- ARRAY_SIZE(md5_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(md5_init, sram_buff_ofs,
+ ARRAY_SIZE(md5_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
sram_buff_ofs += sizeof(md5_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha1_init, sram_buff_ofs,
- ARRAY_SIZE(sha1_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha1_init, sram_buff_ofs,
+ ARRAY_SIZE(sha1_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha1_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha224_init, sram_buff_ofs,
- ARRAY_SIZE(sha224_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha224_init, sram_buff_ofs,
+ ARRAY_SIZE(sha224_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha224_init);
larval_seq_len = 0;
- ssi_sram_mgr_const2sram_desc(sha256_init, sram_buff_ofs,
- ARRAY_SIZE(sha256_init), larval_seq,
- &larval_seq_len);
+ cc_set_sram_desc(sha256_init, sram_buff_ofs,
+ ARRAY_SIZE(sha256_init), larval_seq,
+ &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
if (unlikely(rc != 0))
goto init_digest_const_err;
const u32 const0 = ((u32 *)((u64 *)&sha384_init[i]))[1];
const u32 const1 = ((u32 *)((u64 *)&sha384_init[i]))[0];
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
+ &larval_seq_len);
sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
+ &larval_seq_len);
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
const u32 const0 = ((u32 *)((u64 *)&sha512_init[i]))[1];
const u32 const1 = ((u32 *)((u64 *)&sha512_init[i]))[0];
- ssi_sram_mgr_const2sram_desc(&const0, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(&const0, sram_buff_ofs, 1, larval_seq,
+ &larval_seq_len);
sram_buff_ofs += sizeof(u32);
- ssi_sram_mgr_const2sram_desc(&const1, sram_buff_ofs, 1,
- larval_seq, &larval_seq_len);
+ cc_set_sram_desc(&const1, sram_buff_ofs, 1, larval_seq,
+ &larval_seq_len);
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
sizeof(sha224_init) +
sizeof(sha256_init);
- sram_buff = ssi_sram_mgr_alloc(drvdata, sram_size_to_alloc);
+ sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
if (sram_buff == NULL_SRAM_ADDR) {
dev_err(dev, "SRAM pool exhausted\n");
rc = -ENOMEM;
goto out;
}
/* Allocate IV pool in SRAM */
- ivgen_ctx->pool = ssi_sram_mgr_alloc(drvdata, SSI_IVPOOL_SIZE);
+ ivgen_ctx->pool = cc_sram_alloc(drvdata, SSI_IVPOOL_SIZE);
if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
dev_err(device, "SRAM pool exhausted\n");
rc = -ENOMEM;
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
-int ssi_power_mgr_runtime_suspend(struct device *dev)
+int cc_pm_suspend(struct device *dev)
{
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)dev_get_drvdata(dev);
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
- rc = ssi_request_mgr_runtime_suspend_queue(drvdata);
+ rc = cc_suspend_req_queue(drvdata);
if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_suspend_queue (%x)\n",
+ dev_err(dev, "cc_suspend_req_queue (%x)\n",
rc);
return rc;
}
return 0;
}
-int ssi_power_mgr_runtime_resume(struct device *dev)
+int cc_pm_resume(struct device *dev)
{
int rc;
struct ssi_drvdata *drvdata =
return rc;
}
- rc = ssi_request_mgr_runtime_resume_queue(drvdata);
+ rc = cc_resume_req_queue(drvdata);
if (rc != 0) {
- dev_err(dev, "ssi_request_mgr_runtime_resume_queue (%x)\n", rc);
+ dev_err(dev, "cc_resume_req_queue (%x)\n", rc);
return rc;
}
return 0;
}
-int ssi_power_mgr_runtime_get(struct device *dev)
+int cc_pm_get(struct device *dev)
{
int rc = 0;
- if (ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
+ if (cc_req_queue_suspended((struct ssi_drvdata *)dev_get_drvdata(dev))) {
rc = pm_runtime_get_sync(dev);
} else {
pm_runtime_get_noresume(dev);
return rc;
}
-int ssi_power_mgr_runtime_put_suspend(struct device *dev)
+int cc_pm_put_suspend(struct device *dev)
{
int rc = 0;
- if (!ssi_request_mgr_is_queue_runtime_suspend(
- (struct ssi_drvdata *)dev_get_drvdata(dev))) {
+ if (!cc_req_queue_suspended((struct ssi_drvdata *)dev_get_drvdata(dev))) {
pm_runtime_mark_last_busy(dev);
rc = pm_runtime_put_autosuspend(dev);
} else {
#endif
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
+int cc_pm_init(struct ssi_drvdata *drvdata)
{
int rc = 0;
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
return rc;
}
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata)
+void cc_pm_fini(struct ssi_drvdata *drvdata)
{
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
pm_runtime_disable(drvdata_to_dev(drvdata));
#define SSI_SUSPEND_TIMEOUT 3000
-int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
+int cc_pm_init(struct ssi_drvdata *drvdata);
-void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
+void cc_pm_fini(struct ssi_drvdata *drvdata);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_power_mgr_runtime_suspend(struct device *dev);
+int cc_pm_suspend(struct device *dev);
-int ssi_power_mgr_runtime_resume(struct device *dev);
+int cc_pm_resume(struct device *dev);
-int ssi_power_mgr_runtime_get(struct device *dev);
+int cc_pm_get(struct device *dev);
-int ssi_power_mgr_runtime_put_suspend(struct device *dev);
+int cc_pm_put_suspend(struct device *dev);
#endif
#endif /*__POWER_MGR_H__*/
(!is_dout ? 1 : 0));
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_get(dev);
+ rc = cc_pm_get(dev);
if (rc != 0) {
- dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
+ dev_err(dev, "cc_pm_get returned %x\n", rc);
return rc;
}
#endif
* (SW queue is full)
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
+ cc_pm_put_suspend(dev);
#endif
return rc;
}
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- ssi_power_mgr_runtime_put_suspend(dev);
+ cc_pm_put_suspend(dev);
#endif
return rc;
}
dev_dbg(dev, "Request completed. axi_completed=%d\n",
request_mgr_handle->axi_completed);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
- rc = ssi_power_mgr_runtime_put_suspend(dev);
+ rc = cc_pm_put_suspend(dev);
if (rc != 0)
dev_err(dev, "Failed to set runtime suspension %d\n",
rc);
* the spin lock protection
*/
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata)
+int cc_resume_req_queue(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle;
* suspend the queue configuration. Since it is used for the runtime suspend
* only verify that the queue can be suspended.
*/
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata)
+int cc_suspend_req_queue(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
return 0;
}
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata)
+bool cc_req_queue_suspended(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
void request_mgr_fini(struct ssi_drvdata *drvdata);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
-int ssi_request_mgr_runtime_resume_queue(struct ssi_drvdata *drvdata);
+int cc_resume_req_queue(struct ssi_drvdata *drvdata);
-int ssi_request_mgr_runtime_suspend_queue(struct ssi_drvdata *drvdata);
+int cc_suspend_req_queue(struct ssi_drvdata *drvdata);
-bool ssi_request_mgr_is_queue_runtime_suspend(struct ssi_drvdata *drvdata);
+bool cc_req_queue_suspended(struct ssi_drvdata *drvdata);
#endif
#endif /*__REQUEST_MGR_H__*/
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size)
+ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
}
/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void ssi_sram_mgr_const2sram_desc(
- const u32 *src, ssi_sram_addr_t dst,
- unsigned int nelement,
- struct cc_hw_desc *seq, unsigned int *seq_len)
+void cc_set_sram_desc(const u32 *src, ssi_sram_addr_t dst,
+ unsigned int nelement, struct cc_hw_desc *seq,
+ unsigned int *seq_len)
{
u32 i;
unsigned int idx = *seq_len;
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t ssi_sram_mgr_alloc(struct ssi_drvdata *drvdata, u32 size);
+ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size);
/**
- * ssi_sram_mgr_const2sram_desc() - Create const descriptors sequence to
+ * cc_set_sram_desc() - Create const descriptors sequence to
* set values in given array into SRAM.
* Note: each const value can't exceed word size.
*
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void ssi_sram_mgr_const2sram_desc(
+void cc_set_sram_desc(
const u32 *src, ssi_sram_addr_t dst,
unsigned int nelement,
struct cc_hw_desc *seq, unsigned int *seq_len);