diff --git a/arch/arm64/configs/xilinx_defconfig b/arch/arm64/configs/xilinx_defconfig index cc03a55d7414cd..5723b68a8c6d76 100644 --- a/arch/arm64/configs/xilinx_defconfig +++ b/arch/arm64/configs/xilinx_defconfig @@ -453,7 +453,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_CRYPTO_CRCT10DIF=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y CONFIG_CRYPTO_DEV_XILINX_RSA=y CONFIG_CRYPTO_DEV_ZYNQMP_AES_SKCIPHER=y # CONFIG_XZ_DEC_X86 is not set diff --git a/arch/arm64/configs/xilinx_zynqmp_defconfig b/arch/arm64/configs/xilinx_zynqmp_defconfig index 83ab6fdebf737d..0fd9de12f412c7 100644 --- a/arch/arm64/configs/xilinx_zynqmp_defconfig +++ b/arch/arm64/configs/xilinx_zynqmp_defconfig @@ -409,7 +409,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_CRYPTO_CRCT10DIF=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_DEV_ZYNQMP_SHA3=y CONFIG_CRYPTO_DEV_XILINX_RSA=y CONFIG_CRYPTO_DEV_ZYNQMP_AES_SKCIPHER=y CONFIG_DMA_CMA=y diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 531aba860a5711..c04a2a8c6a6c7f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -785,7 +785,19 @@ config CRYPTO_DEV_ROCKCHIP Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. config CRYPTO_DEV_ZYNQMP_SHA3 - tristate "Support for Xilinx ZynqMP SHA3 hw accelerator" + tristate "Support for Xilinx ZynqMP SHA3 hardware accelerator" + depends on ZYNQMP_FIRMWARE || COMPILE_TEST + depends on !CRYPTO_DEV_ZYNQMP_KECCAK_384 + select CRYPTO_SHA3 + help + Xilinx ZynqMP has SHA3 engine used for secure hash calculation. + This driver interfaces with SHA3 hardware engine. + Select this if you want to use the ZynqMP module + for SHA3 hash computation. + +config CRYPTO_DEV_ZYNQMP_KECCAK_384 + bool "Support for Xilinx ZynqMP SHA3 hw accelerator(ahash)" + default y depends on ARCH_ZYNQMP select CRYPTO_HASH help diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile index ff688816f7d92c..6fa808c78aa4d1 100644 --- a/drivers/crypto/xilinx/Makefile +++ b/drivers/crypto/xilinx/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES_SKCIPHER) += zynqmp-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_KECCAK_384) += zynqmp-sha-deprecated.o obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o obj-$(CONFIG_CRYPTO_DEV_XILINX_RSA) += zynqmp-rsa.o diff --git a/drivers/crypto/xilinx/zynqmp-sha-deprecated.c b/drivers/crypto/xilinx/zynqmp-sha-deprecated.c new file mode 100644 index 00000000000000..b0cc54579442ca --- /dev/null +++ b/drivers/crypto/xilinx/zynqmp-sha-deprecated.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2017 Xilinx, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZYNQMP_SHA3_INIT 1 +#define ZYNQMP_SHA3_UPDATE 2 +#define ZYNQMP_SHA3_FINAL 4 + +#define ZYNQMP_SHA_QUEUE_LENGTH 1 + +static struct zynqmp_sha_dev *sha_dd; + +/* + * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as + * tested by the ahash_prepare_alg() function. + */ +struct zynqmp_sha_reqctx { + struct zynqmp_sha_dev *dd; + unsigned long flags; +}; + +struct zynqmp_sha_ctx { + struct zynqmp_sha_dev *dd; + unsigned long flags; +}; + +struct zynqmp_sha_dev { + struct list_head list; + struct device *dev; + /* the lock protects queue and dev list*/ + spinlock_t lock; + int err; + + unsigned long flags; + struct crypto_queue queue; + struct ahash_request *req; +}; + +struct zynqmp_sha_drv { + struct list_head dev_list; + /* the lock protects queue and dev list*/ + spinlock_t lock; + /* the hw_engine_mutex makes the driver thread-safe */ + struct mutex hw_engine_mutex; +}; + +static struct zynqmp_sha_drv zynqmp_sha = { + .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list), + .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock), +}; + +static int zynqmp_sha_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm); + struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); + struct zynqmp_sha_dev *dd = sha_dd; + int ret; + + spin_lock_bh(&zynqmp_sha.lock); + if (!tctx->dd) + tctx->dd = dd; + else + dd = tctx->dd; + + spin_unlock_bh(&zynqmp_sha.lock); + + ctx->dd = dd; + dev_dbg(dd->dev, "init: digest size: %d\n", + crypto_ahash_digestsize(tfm)); + + ret = mutex_lock_interruptible(&zynqmp_sha.hw_engine_mutex); + if (ret) + goto end; + + ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT); + +end: + return ret; +} + +static int zynqmp_sha_update(struct ahash_request *req) +{ + struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct zynqmp_sha_dev *dd = tctx->dd; + char *kbuf; + size_t dma_size = req->nbytes; + dma_addr_t dma_addr; + int ret; + + if (!req->nbytes) + return 0; + + kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0); + caches_clean_inval_user_pou((unsigned long)kbuf, + (unsigned long)kbuf + dma_size); + ret = zynqmp_pm_sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE); + if (ret) { + mutex_unlock(&zynqmp_sha.hw_engine_mutex); + goto end; + } + + dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr); + +end: + return ret; +} + +static int zynqmp_sha_final(struct ahash_request *req) +{ + struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + struct zynqmp_sha_dev *dd = tctx->dd; + char *kbuf; + size_t dma_size = SHA384_DIGEST_SIZE; + dma_addr_t dma_addr; + int ret; + + kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + ret = zynqmp_pm_sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL); + memcpy(req->result, kbuf, 48); + dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr); + + mutex_unlock(&zynqmp_sha.hw_engine_mutex); + return ret; +} + +static int zynqmp_sha_finup(struct ahash_request *req) +{ + zynqmp_sha_update(req); + zynqmp_sha_final(req); + + return 0; +} + +static int zynqmp_sha_digest(struct ahash_request *req) +{ + zynqmp_sha_init(req); + zynqmp_sha_update(req); + zynqmp_sha_final(req); + + return 0; +} + +static int zynqmp_sha_export(struct ahash_request *req, void *out) +{ + const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); + + memcpy(out, ctx, sizeof(*ctx)); + return 0; +} + +static int zynqmp_sha_import(struct ahash_request *req, const void *in) +{ + struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); + + memcpy(ctx, in, sizeof(*ctx)); + return 0; +} + +static int zynqmp_sha_cra_init(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct zynqmp_sha_reqctx)); + + return 0; +} + +static struct ahash_alg sha3_alg = { + .init = zynqmp_sha_init, + .update = zynqmp_sha_update, + .final = zynqmp_sha_final, + .finup = zynqmp_sha_finup, + .digest = zynqmp_sha_digest, + .export = zynqmp_sha_export, + .import = zynqmp_sha_import, + .halg = { + .digestsize = SHA384_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "xilinx-keccak-384", + .cra_driver_name = "zynqmp-keccak-384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct zynqmp_sha_ctx), + .cra_alignmask = 0, + .cra_module = THIS_MODULE, + .cra_init = zynqmp_sha_cra_init, + } + } +}; + +static const struct of_device_id zynqmp_sha_dt_ids[] = { + { .compatible = "xlnx,zynqmp-keccak-384" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids); + +static int zynqmp_sha_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int err; + + sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); + if (!sha_dd) + return -ENOMEM; + + sha_dd->dev = dev; + platform_set_drvdata(pdev, sha_dd); + INIT_LIST_HEAD(&sha_dd->list); + spin_lock_init(&sha_dd->lock); + mutex_init(&zynqmp_sha.hw_engine_mutex); + crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH); + spin_lock(&zynqmp_sha.lock); + list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list); + spin_unlock(&zynqmp_sha.lock); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err < 0) + dev_err(dev, "no usable DMA configuration"); + + err = crypto_register_ahash(&sha3_alg); + if (err) + goto err_algs; + + return 0; + +err_algs: + spin_lock(&zynqmp_sha.lock); + list_del(&sha_dd->list); + spin_unlock(&zynqmp_sha.lock); + dev_err(dev, "initialization failed.\n"); + + return err; +} + +static int zynqmp_sha_remove(struct platform_device *pdev) +{ + sha_dd = platform_get_drvdata(pdev); + + if (!sha_dd) + return -ENODEV; + + spin_lock(&zynqmp_sha.lock); + list_del(&sha_dd->list); + spin_unlock(&zynqmp_sha.lock); + + crypto_unregister_ahash(&sha3_alg); + + return 0; +} + +static struct platform_driver zynqmp_sha_driver = { + .probe = zynqmp_sha_probe, + .remove = zynqmp_sha_remove, + .driver = { + .name = "zynqmp-keccak-384", + .of_match_table = of_match_ptr(zynqmp_sha_dt_ids), + }, +}; + +module_platform_driver(zynqmp_sha_driver); + +MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nava kishore Manne "); diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index b0cc54579442ca..2caa8b92a3a010 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -1,295 +1,264 @@ -// SPDX-License-Identifier: GPL-2.0+ +// SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2017 Xilinx, Inc. + * Xilinx ZynqMP SHA Driver. + * Copyright (c) 2022 Xilinx Inc. */ - #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include +#include #include -#include - -#define ZYNQMP_SHA3_INIT 1 -#define ZYNQMP_SHA3_UPDATE 2 -#define ZYNQMP_SHA3_FINAL 4 - -#define ZYNQMP_SHA_QUEUE_LENGTH 1 +#include +#include +#include +#include +#include +#include -static struct zynqmp_sha_dev *sha_dd; +#define ZYNQMP_DMA_BIT_MASK 32U +#define ZYNQMP_DMA_ALLOC_FIXED_SIZE 0x1000U -/* - * .statesize = sizeof(struct zynqmp_sha_reqctx) must be <= PAGE_SIZE / 8 as - * tested by the ahash_prepare_alg() function. - */ -struct zynqmp_sha_reqctx { - struct zynqmp_sha_dev *dd; - unsigned long flags; +enum zynqmp_sha_op { + ZYNQMP_SHA3_INIT = 1, + ZYNQMP_SHA3_UPDATE = 2, + ZYNQMP_SHA3_FINAL = 4, }; -struct zynqmp_sha_ctx { - struct zynqmp_sha_dev *dd; - unsigned long flags; +struct zynqmp_sha_drv_ctx { + struct shash_alg sha3_384; + struct device *dev; }; -struct zynqmp_sha_dev { - struct list_head list; - struct device *dev; - /* the lock protects queue and dev list*/ - spinlock_t lock; - int err; - - unsigned long flags; - struct crypto_queue queue; - struct ahash_request *req; +struct zynqmp_sha_tfm_ctx { + struct device *dev; + struct crypto_shash *fbk_tfm; }; -struct zynqmp_sha_drv { - struct list_head dev_list; - /* the lock protects queue and dev list*/ - spinlock_t lock; - /* the hw_engine_mutex makes the driver thread-safe */ - struct mutex hw_engine_mutex; +struct zynqmp_sha_desc_ctx { + struct shash_desc fbk_req; }; -static struct zynqmp_sha_drv zynqmp_sha = { - .dev_list = LIST_HEAD_INIT(zynqmp_sha.dev_list), - .lock = __SPIN_LOCK_UNLOCKED(zynqmp_sha.lock), -}; +static dma_addr_t update_dma_addr, final_dma_addr; +static char *ubuf, *fbuf; -static int zynqmp_sha_init(struct ahash_request *req) +static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct zynqmp_sha_ctx *tctx = crypto_ahash_ctx(tfm); - struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); - struct zynqmp_sha_dev *dd = sha_dd; - int ret; - - spin_lock_bh(&zynqmp_sha.lock); - if (!tctx->dd) - tctx->dd = dd; - else - dd = tctx->dd; + const char *fallback_driver_name = crypto_shash_alg_name(hash); + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); + struct shash_alg *alg = crypto_shash_alg(hash); + struct crypto_shash *fallback_tfm; + struct zynqmp_sha_drv_ctx *drv_ctx; - spin_unlock_bh(&zynqmp_sha.lock); + drv_ctx = container_of(alg, struct zynqmp_sha_drv_ctx, sha3_384); + tfm_ctx->dev = drv_ctx->dev; - ctx->dd = dd; - dev_dbg(dd->dev, "init: digest size: %d\n", - crypto_ahash_digestsize(tfm)); + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) + return PTR_ERR(fallback_tfm); - ret = mutex_lock_interruptible(&zynqmp_sha.hw_engine_mutex); - if (ret) - goto end; - - ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT); + tfm_ctx->fbk_tfm = fallback_tfm; + hash->descsize += crypto_shash_descsize(tfm_ctx->fbk_tfm); -end: - return ret; + return 0; } -static int zynqmp_sha_update(struct ahash_request *req) +static void zynqmp_sha_exit_tfm(struct crypto_shash *hash) { - struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct zynqmp_sha_dev *dd = tctx->dd; - char *kbuf; - size_t dma_size = req->nbytes; - dma_addr_t dma_addr; - int ret; - - if (!req->nbytes) - return 0; + struct zynqmp_sha_tfm_ctx *tfm_ctx = crypto_shash_ctx(hash); - kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL); - if (!kbuf) - return -ENOMEM; - - scatterwalk_map_and_copy(kbuf, req->src, 0, req->nbytes, 0); - caches_clean_inval_user_pou((unsigned long)kbuf, - (unsigned long)kbuf + dma_size); - ret = zynqmp_pm_sha_hash(dma_addr, req->nbytes, ZYNQMP_SHA3_UPDATE); - if (ret) { - mutex_unlock(&zynqmp_sha.hw_engine_mutex); - goto end; + if (tfm_ctx->fbk_tfm) { + crypto_free_shash(tfm_ctx->fbk_tfm); + tfm_ctx->fbk_tfm = NULL; } - dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr); - -end: - return ret; + memzero_explicit(tfm_ctx, sizeof(struct zynqmp_sha_tfm_ctx)); } -static int zynqmp_sha_final(struct ahash_request *req) +static int zynqmp_sha_init(struct shash_desc *desc) { - struct zynqmp_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct zynqmp_sha_dev *dd = tctx->dd; - char *kbuf; - size_t dma_size = SHA384_DIGEST_SIZE; - dma_addr_t dma_addr; - int ret; + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - kbuf = dma_alloc_coherent(dd->dev, dma_size, &dma_addr, GFP_KERNEL); - if (!kbuf) - return -ENOMEM; + dctx->fbk_req.tfm = tctx->fbk_tfm; + return crypto_shash_init(&dctx->fbk_req); +} - ret = zynqmp_pm_sha_hash(dma_addr, dma_size, ZYNQMP_SHA3_FINAL); - memcpy(req->result, kbuf, 48); - dma_free_coherent(dd->dev, dma_size, kbuf, dma_addr); +static int zynqmp_sha_update(struct shash_desc *desc, const u8 *data, unsigned int length) +{ + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - mutex_unlock(&zynqmp_sha.hw_engine_mutex); - return ret; + return crypto_shash_update(&dctx->fbk_req, data, length); } -static int zynqmp_sha_finup(struct ahash_request *req) +static int zynqmp_sha_final(struct shash_desc *desc, u8 *out) { - zynqmp_sha_update(req); - zynqmp_sha_final(req); + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - return 0; + return crypto_shash_final(&dctx->fbk_req, out); } -static int zynqmp_sha_digest(struct ahash_request *req) +static int zynqmp_sha_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - zynqmp_sha_init(req); - zynqmp_sha_update(req); - zynqmp_sha_final(req); + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - return 0; + return crypto_shash_finup(&dctx->fbk_req, data, length, out); } -static int zynqmp_sha_export(struct ahash_request *req, void *out) +static int zynqmp_sha_import(struct shash_desc *desc, const void *in) { - const struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); + struct zynqmp_sha_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); - memcpy(out, ctx, sizeof(*ctx)); - return 0; + dctx->fbk_req.tfm = tctx->fbk_tfm; + return crypto_shash_import(&dctx->fbk_req, in); } -static int zynqmp_sha_import(struct ahash_request *req, const void *in) +static int zynqmp_sha_export(struct shash_desc *desc, void *out) { - struct zynqmp_sha_reqctx *ctx = ahash_request_ctx(req); + struct zynqmp_sha_desc_ctx *dctx = shash_desc_ctx(desc); - memcpy(ctx, in, sizeof(*ctx)); - return 0; + return crypto_shash_export(&dctx->fbk_req, out); } -static int zynqmp_sha_cra_init(struct crypto_tfm *tfm) +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct zynqmp_sha_reqctx)); + unsigned int remaining_len = len; + int update_size; + int ret; - return 0; + ret = zynqmp_pm_sha_hash(0, 0, ZYNQMP_SHA3_INIT); + if (ret) + return ret; + + while (remaining_len != 0) { + memzero_explicit(ubuf, ZYNQMP_DMA_ALLOC_FIXED_SIZE); + if (remaining_len >= ZYNQMP_DMA_ALLOC_FIXED_SIZE) { + update_size = ZYNQMP_DMA_ALLOC_FIXED_SIZE; + remaining_len -= ZYNQMP_DMA_ALLOC_FIXED_SIZE; + } else { + update_size = remaining_len; + remaining_len = 0; + } + memcpy(ubuf, data, update_size); + flush_icache_range((unsigned long)ubuf, (unsigned long)ubuf + update_size); + ret = zynqmp_pm_sha_hash(update_dma_addr, update_size, ZYNQMP_SHA3_UPDATE); + if (ret) + return ret; + + data += update_size; + } + + ret = zynqmp_pm_sha_hash(final_dma_addr, SHA3_384_DIGEST_SIZE, ZYNQMP_SHA3_FINAL); + memcpy(out, fbuf, SHA3_384_DIGEST_SIZE); + memzero_explicit(fbuf, SHA3_384_DIGEST_SIZE); + + return ret; } -static struct ahash_alg sha3_alg = { - .init = zynqmp_sha_init, - .update = zynqmp_sha_update, - .final = zynqmp_sha_final, - .finup = zynqmp_sha_finup, - .digest = zynqmp_sha_digest, - .export = zynqmp_sha_export, - .import = zynqmp_sha_import, - .halg = { - .digestsize = SHA384_DIGEST_SIZE, - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "xilinx-keccak-384", - .cra_driver_name = "zynqmp-keccak-384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct zynqmp_sha_ctx), - .cra_alignmask = 0, - .cra_module = THIS_MODULE, - .cra_init = zynqmp_sha_cra_init, +static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { + .sha3_384 = { + .init = zynqmp_sha_init, + .update = zynqmp_sha_update, + .final = zynqmp_sha_final, + .finup = zynqmp_sha_finup, + .digest = zynqmp_sha_digest, + .export = zynqmp_sha_export, + .import = zynqmp_sha_import, + .init_tfm = zynqmp_sha_init_tfm, + .exit_tfm = zynqmp_sha_exit_tfm, + .descsize = sizeof(struct zynqmp_sha_desc_ctx), + .statesize = sizeof(struct sha3_state), + .digestsize = SHA3_384_DIGEST_SIZE, + .base = { + .cra_name = "sha3-384", + .cra_driver_name = "zynqmp-sha3-384", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA3_384_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, } } }; -static const struct of_device_id zynqmp_sha_dt_ids[] = { - { .compatible = "xlnx,zynqmp-keccak-384" }, - { /* sentinel */ } -}; - -MODULE_DEVICE_TABLE(of, zynqmp_sha_dt_ids); - static int zynqmp_sha_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int err; + u32 v; - sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); - if (!sha_dd) - return -ENOMEM; - - sha_dd->dev = dev; - platform_set_drvdata(pdev, sha_dd); - INIT_LIST_HEAD(&sha_dd->list); - spin_lock_init(&sha_dd->lock); - mutex_init(&zynqmp_sha.hw_engine_mutex); - crypto_init_queue(&sha_dd->queue, ZYNQMP_SHA_QUEUE_LENGTH); - spin_lock(&zynqmp_sha.lock); - list_add_tail(&sha_dd->list, &zynqmp_sha.dev_list); - spin_unlock(&zynqmp_sha.lock); - - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err < 0) - dev_err(dev, "no usable DMA configuration"); - - err = crypto_register_ahash(&sha3_alg); + /* Verify the hardware is present */ + err = zynqmp_pm_get_api_version(&v); if (err) - goto err_algs; + return err; + + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(ZYNQMP_DMA_BIT_MASK)); + if (err < 0) { + dev_err(dev, "No usable DMA configuration\n"); + return err; + } + + err = crypto_register_shash(&sha3_drv_ctx.sha3_384); + if (err < 0) { + dev_err(dev, "Failed to register shash alg.\n"); + return err; + } + + sha3_drv_ctx.dev = dev; + platform_set_drvdata(pdev, &sha3_drv_ctx); + + ubuf = dma_alloc_coherent(dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, &update_dma_addr, GFP_KERNEL); + if (!ubuf) { + err = -ENOMEM; + goto err_shash; + } + + fbuf = dma_alloc_coherent(dev, SHA3_384_DIGEST_SIZE, &final_dma_addr, GFP_KERNEL); + if (!fbuf) { + err = -ENOMEM; + goto err_mem; + } return 0; -err_algs: - spin_lock(&zynqmp_sha.lock); - list_del(&sha_dd->list); - spin_unlock(&zynqmp_sha.lock); - dev_err(dev, "initialization failed.\n"); +err_mem: + dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr); + +err_shash: + crypto_unregister_shash(&sha3_drv_ctx.sha3_384); return err; } static int zynqmp_sha_remove(struct platform_device *pdev) { - sha_dd = platform_get_drvdata(pdev); - - if (!sha_dd) - return -ENODEV; + sha3_drv_ctx.dev = platform_get_drvdata(pdev); - spin_lock(&zynqmp_sha.lock); - list_del(&sha_dd->list); - spin_unlock(&zynqmp_sha.lock); - - crypto_unregister_ahash(&sha3_alg); + dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr); + dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr); + crypto_unregister_shash(&sha3_drv_ctx.sha3_384); return 0; } static struct platform_driver zynqmp_sha_driver = { - .probe = zynqmp_sha_probe, - .remove = zynqmp_sha_remove, - .driver = { - .name = "zynqmp-keccak-384", - .of_match_table = of_match_ptr(zynqmp_sha_dt_ids), + .probe = zynqmp_sha_probe, + .remove = zynqmp_sha_remove, + .driver = { + .name = "zynqmp-sha3-384", }, }; module_platform_driver(zynqmp_sha_driver); - -MODULE_DESCRIPTION("ZynqMP SHA3 hw acceleration support."); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Nava kishore Manne "); +MODULE_DESCRIPTION("ZynqMP SHA3 hardware acceleration support."); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Harsha ");