wlan-ap-Telecominfraproject/feeds/qca-wifi-7/ipq53xx/patches-6.1/0590-crypto-qce-Add-command-descriptor-support-for-skciph.patch
John Crispin 68cf54d9f7 qca-wifi-7: update to ath12.5.5
Signed-off-by: John Crispin <john@phrozen.org>
2025-02-27 12:45:52 +01:00

305 lines
9.3 KiB
Diff

From e46e7d80cfd358900ad0fb15e90fb71fc1f7c90f Mon Sep 17 00:00:00 2001
From: Md Sadre Alam <quic_mdalam@quicinc.com>
Date: Wed, 1 Nov 2023 16:25:28 +0530
Subject: [PATCH] crypto: qce - Add command descriptor support for skcipher
Add command descriptor support for skcipher. With command
descriptor support all the register read/write for skcipher
request will go via bam.
Change-Id: Id474b90e4786f9066132ad771bd53bb72f6f2224
Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
---
drivers/crypto/qce/common.c | 170 +++++++++++++++++++++++++++++++++-
drivers/crypto/qce/core.h | 6 +-
drivers/crypto/qce/skcipher.c | 17 ++++
3 files changed, 188 insertions(+), 5 deletions(-)
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 63cbf42a643b..7b048c35280e 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -27,6 +27,15 @@ static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
writel(val, qce->base + offset);
}
+static inline void qce_write_array_dma(struct qce_device *qce, u32 offset,
+ const u32 *val, unsigned int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ qce_write_reg_dma(qce, offset + i * sizeof(u32), val[i], 1);
+}
+
static inline void qce_write_array(struct qce_device *qce, u32 offset,
const u32 *val, unsigned int len)
{
@@ -77,6 +86,18 @@ void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
}
}
+static void qce_setup_config_dma(struct qce_device *qce)
+{
+ u32 config;
+
+ /* get big endianness */
+ config = qce_config_reg(qce, 0);
+
+ /* clear status */
+ qce_write_reg_dma(qce, REG_STATUS, 0, 1);
+ qce_write_reg_dma(qce, REG_CONFIG, config, 1);
+}
+
static void qce_setup_config(struct qce_device *qce)
{
u32 config;
@@ -294,6 +315,22 @@ static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
}
+static void qce_xtskey_dma(struct qce_device *qce, const u8 *enckey,
+ unsigned int enckeylen, unsigned int cryptlen)
+{
+ u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+ unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+
+ qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+ enckeylen / 2);
+ qce_write_array_dma(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+ /* Set data unit size to cryptlen. Anything else causes
+ * crypto engine to return back incorrect results.
+ */
+ qce_write_reg_dma(qce, REG_ENCR_XTS_DU_SIZE, cryptlen, 1);
+}
+
static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
unsigned int enckeylen, unsigned int cryptlen)
{
@@ -310,6 +347,97 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
}
+static int qce_setup_regs_skcipher_dma(struct crypto_async_request *async_req)
+{
+ struct skcipher_request *req = skcipher_request_cast(async_req);
+ struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
+ struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+ struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
+ struct qce_device *qce = tmpl->qce;
+ __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+ __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+ unsigned int enckey_words, enciv_words;
+ unsigned int keylen;
+ u32 encr_cfg = 0, auth_cfg = 0, config;
+ unsigned int ivsize = rctx->ivsize;
+ unsigned long flags = rctx->flags;
+ int ret;
+
+ qce_clear_bam_transaction(qce);
+ qce_setup_config_dma(qce);
+
+ if (IS_XTS(flags))
+ keylen = ctx->enc_keylen / 2;
+ else
+ keylen = ctx->enc_keylen;
+
+ qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+ enckey_words = keylen / sizeof(u32);
+
+ qce_write_reg_dma(qce, REG_AUTH_SEG_CFG, 0, 1);
+
+ encr_cfg = qce_encr_cfg(flags, keylen);
+
+ if (IS_DES(flags)) {
+ enciv_words = 2;
+ enckey_words = 2;
+ } else if (IS_3DES(flags)) {
+ enciv_words = 2;
+ enckey_words = 6;
+ } else if (IS_AES(flags)) {
+ if (IS_XTS(flags))
+ qce_xtskey_dma(qce, ctx->enc_key, ctx->enc_keylen,
+ rctx->cryptlen);
+ enciv_words = 4;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!qce->use_fixed_key)
+ qce_write_array_dma(qce, REG_ENCR_KEY0, (u32 *)enckey,
+ enckey_words);
+
+ if (!IS_ECB(flags)) {
+ if (IS_XTS(flags))
+ qce_xts_swapiv(enciv, rctx->iv, ivsize);
+ else
+ qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+ qce_write_array_dma(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+ }
+
+ if (IS_ENCRYPT(flags))
+ encr_cfg |= BIT(ENCODE_SHIFT);
+
+ qce_write_reg_dma(qce, REG_ENCR_SEG_CFG, encr_cfg, 1);
+ qce_write_reg_dma(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen, 1);
+ qce_write_reg_dma(qce, REG_ENCR_SEG_START, 0, 1);
+
+ if (IS_CTR(flags)) {
+ qce_write_reg_dma(qce, REG_CNTR_MASK, ~0, 1);
+ qce_write_reg_dma(qce, REG_CNTR_MASK0, ~0, 1);
+ qce_write_reg_dma(qce, REG_CNTR_MASK1, ~0, 1);
+ qce_write_reg_dma(qce, REG_CNTR_MASK2, ~0, 1);
+ }
+
+ qce_write_reg_dma(qce, REG_SEG_SIZE, rctx->cryptlen, 1);
+
+ /* get little endianness */
+ config = qce_config_reg(qce, 1);
+ qce_write_reg_dma(qce, REG_CONFIG, config, 1);
+
+ qce_write_reg_dma(qce, REG_GOPROC, BIT(GO_SHIFT)
+ | BIT(RESULTS_DUMP_SHIFT), 1);
+
+ ret = qce_submit_cmd_desc(qce, 0);
+ if (ret) {
+ dev_err(qce->dev, "Error in submitting cmd descriptor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
{
struct skcipher_request *req = skcipher_request_cast(async_req);
@@ -393,6 +521,40 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
}
#endif
+int qce_read_dma_get_lock(struct qce_device *qce)
+{
+ int ret;
+ u32 val = 0;
+
+ qce_clear_bam_transaction(qce);
+ qce_read_reg_dma(qce, REG_STATUS, &val, 1);
+
+ ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_LOCK);
+ if (ret) {
+ dev_err(qce->dev, "Error in submitting cmd descriptor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int qce_unlock_reg_dma(struct qce_device *qce)
+{
+ int ret;
+ u32 val = 0;
+
+ qce_clear_bam_transaction(qce);
+ qce_read_reg_dma(qce, REG_STATUS, &val, 1);
+
+ ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_UNLOCK);
+ if (ret) {
+ dev_err(qce->dev, "Error in submitting cmd descriptor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
@@ -547,7 +709,10 @@ int qce_start(struct crypto_async_request *async_req, u32 type)
switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER:
- return qce_setup_regs_skcipher(async_req);
+ if (qce->qce_cmd_desc_enable)
+ return qce_setup_regs_skcipher_dma(async_req);
+ else
+ return qce_setup_regs_skcipher(async_req);
#endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH:
@@ -571,6 +736,9 @@ int qce_check_status(struct qce_device *qce, u32 *status)
*status = qce_read(qce, REG_STATUS);
+ /* Unlock the crypto pipe here */
+ if (qce->qce_cmd_desc_enable)
+ qce_unlock_reg_dma(qce);
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read from device. In case, we need to
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
index f00975c9f926..3d9d88afcabc 100644
--- a/drivers/crypto/qce/core.h
+++ b/drivers/crypto/qce/core.h
@@ -117,12 +117,10 @@ struct qce_algo_ops {
int qce_write_reg_dma(struct qce_device *qce, unsigned int offset, u32 val,
int cnt);
-
int qce_read_reg_dma(struct qce_device *qce, unsigned int offset, void *buff,
int cnt);
-
void qce_clear_bam_transaction(struct qce_device *qce);
-
int qce_submit_cmd_desc(struct qce_device *qce, unsigned long flags);
-
+int qce_read_dma_get_lock(struct qce_device *qce);
+int qce_unlock_reg_dma(struct qce_device *qce);
#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index c733413ab8aa..35b2f9b07bd1 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -41,6 +41,7 @@ static void qce_skcipher_done(void *data)
struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
struct qce_device *qce = tmpl->qce;
struct qce_result_dump *result_buf = qce->dma.result_buf;
+ struct qce_bam_transaction *qce_bam_txn = qce->dma.qce_bam_txn;
enum dma_data_direction dir_src, dir_dst;
u32 status;
int error;
@@ -61,6 +62,19 @@ static void qce_skcipher_done(void *data)
sg_free_table(&rctx->dst_tbl);
+ if (qce->qce_cmd_desc_enable) {
+ if (qce_bam_txn->qce_read_sgl_cnt)
+ dma_unmap_sg(qce->dev,
+ qce_bam_txn->qce_reg_read_sgl,
+ qce_bam_txn->qce_read_sgl_cnt,
+ DMA_DEV_TO_MEM);
+ if (qce_bam_txn->qce_write_sgl_cnt)
+ dma_unmap_sg(qce->dev,
+ qce_bam_txn->qce_reg_write_sgl,
+ qce_bam_txn->qce_write_sgl_cnt,
+ DMA_MEM_TO_DEV);
+ }
+
error = qce_check_status(qce, &status);
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
@@ -91,6 +105,9 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+ if (qce->qce_cmd_desc_enable)
+ qce_read_dma_get_lock(qce);
+
rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
--
2.34.1