struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
bool chained;
u32 size_to_unmap = 0;
dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
DMA_BIDIRECTIONAL);
}
-#if DX_HAS_ACP
- if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+ if (drvdata->coherent &&
+ (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
likely(req->src == req->dst))
{
u32 size_to_skip = req->assoclen;
size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
}
-#endif
}
static inline int ssi_buffer_mgr_get_aead_icv_nents(
* MAC verification upon request completion
*/
if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
-#if !DX_HAS_ACP
- /* In ACP platform we already copying ICV
- * for any INPLACE-DECRYPT operation, hence
+ if (!drvdata->coherent) {
+ /* In coherent platforms (e.g. ACP)
+ * already copying ICV for any
+ * INPLACE-DECRYPT operation, hence
* we must neglect this code.
*/
- u32 size_to_skip = req->assoclen;
- if (areq_ctx->is_gcm4543) {
- size_to_skip += crypto_aead_ivsize(tfm);
+ u32 skip = req->assoclen;
+
+ if (areq_ctx->is_gcm4543)
+ skip += crypto_aead_ivsize(tfm);
+
+ ssi_buffer_mgr_copy_scatterlist_portion(
+ areq_ctx->backup_mac, req->src,
+ (skip + req->cryptlen -
+ areq_ctx->req_authsize),
+ skip + req->cryptlen,
+ SSI_SG_TO_BUF);
}
- ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->backup_mac, req->src,
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
- size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
-#endif
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else {
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
mlli_params->curr_pool = NULL;
sg_data.num_of_buffers = 0;
-#if DX_HAS_ACP
- if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
+ if (drvdata->coherent &&
+ (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) &&
likely(req->src == req->dst))
{
u32 size_to_skip = req->assoclen;
size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
}
-#endif
/* cacluate the size for cipher remove ICV in decrypt*/
areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
#include <linux/version.h>
-#define DISABLE_COHERENT_DMA_OPS
//#define FLUSH_CACHE_ALL
//#define COMPLETION_DELAY
//#define DX_DUMP_DESCS
//#define DX_IRQ_DELAY 100000
#define DMA_BIT_MASK_LEN 48 /* was 32 bit, but for juno's sake it was enlarged to 48 bit */
-#if defined (CONFIG_ARM64) // TODO currently only this mode was test on Juno (which is ARM64), need to enable coherent also.
-#define DISABLE_COHERENT_DMA_OPS
-#endif
-
-/* Define the CryptoCell DMA cache coherency signals configuration */
-#if defined (DISABLE_COHERENT_DMA_OPS)
- /* Software Controlled Cache Coherency (SCCC) */
- #define SSI_CACHE_PARAMS (0x000)
- /* CC attached to NONE-ACP such as HPP/ACE/AMBA4.
- * The customer is responsible to enable/disable this feature
- * according to his platform type.
- */
- #define DX_HAS_ACP 0
-#else
- #define SSI_CACHE_PARAMS (0xEEE)
- /* CC attached to ACP */
- #define DX_HAS_ACP 1
-#endif
-
#endif /*__DX_CONFIG_H__*/
#include <linux/random.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/of_address.h>
#include "ssi_config.h"
#include "ssi_driver.h"
int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
{
- unsigned int val;
+ unsigned int val, cache_params;
void __iomem *cc_base = drvdata->cc_base;
/* Unmask all AXI interrupt sources AXI_CFG1 register */
}
#endif
+ cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
+
val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
if (is_probe == true) {
SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
}
- CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS), SSI_CACHE_PARAMS);
+ CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
+ cache_params);
val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
if (is_probe == true) {
- SSI_LOG_INFO("Cache params current: 0x%08X (expected: 0x%08X)\n", val, SSI_CACHE_PARAMS);
+ SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
+ val, cache_params);
}
return 0;
}
new_drvdata->clk = of_clk_get(np, 0);
+ new_drvdata->coherent = of_dma_is_coherent(np);
/*Initialize inflight counter used in dx_ablkcipher_secure_complete used for count of BYSPASS blocks operations*/
new_drvdata->inflight_counter = 0;