public inbox for linux-crypto@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] crypto: cesa: allocate engines with main struct
@ 2026-04-25  2:32 Rosen Penev
  0 siblings, 0 replies; only message in thread
From: Rosen Penev @ 2026-04-25  2:32 UTC (permalink / raw)
  To: linux-crypto
  Cc: Srujana Challa, Bharat Bhushan, Herbert Xu, David S. Miller,
	open list

Use a flexible array member to combine and simplify allocation.

Move struct mv_cesa_dev down as flexible array members require full
definitions.

Signed-off-by: Rosen Penev <rosenp@gmail.com>
---
 drivers/crypto/marvell/cesa/cesa.c | 11 +++-----
 drivers/crypto/marvell/cesa/cesa.h | 42 +++++++++++++++---------------
 2 files changed, 25 insertions(+), 28 deletions(-)

diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 8afa3a87e38d..687ed730174d 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -416,7 +416,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
 	const struct mbus_dram_target_info *dram;
 	struct device *dev = &pdev->dev;
 	struct mv_cesa_dev *cesa;
-	struct mv_cesa_engine *engines;
+	struct mv_cesa_engine *engine;
 	int irq, ret, i, cpu;
 	u32 sram_size;
 
@@ -431,7 +431,8 @@ static int mv_cesa_probe(struct platform_device *pdev)
 			return -ENOTSUPP;
 	}
 
-	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
+	cesa = devm_kzalloc(dev, struct_size(cesa, engines, caps->nengines),
+			GFP_KERNEL);
 	if (!cesa)
 		return -ENOMEM;
 
@@ -445,10 +446,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
 		sram_size = CESA_SA_MIN_SRAM_SIZE;
 
 	cesa->sram_size = sram_size;
-	cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
-				     GFP_KERNEL);
-	if (!cesa->engines)
-		return -ENOMEM;
 
 	spin_lock_init(&cesa->lock);
 
@@ -465,7 +462,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, cesa);
 
 	for (i = 0; i < caps->nengines; i++) {
-		struct mv_cesa_engine *engine = &cesa->engines[i];
+		engine = &cesa->engines[i];
 		char res_name[16];
 
 		engine->id = i;
diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
index 50ca1039fdaa..18f9f28040a6 100644
--- a/drivers/crypto/marvell/cesa/cesa.h
+++ b/drivers/crypto/marvell/cesa/cesa.h
@@ -402,27 +402,6 @@ struct mv_cesa_dev_dma {
 	struct dma_pool *padding_pool;
 };
 
-/**
- * struct mv_cesa_dev - CESA device
- * @caps:	device capabilities
- * @regs:	device registers
- * @sram_size:	usable SRAM size
- * @lock:	device lock
- * @engines:	array of engines
- * @dma:	dma pools
- *
- * Structure storing CESA device information.
- */
-struct mv_cesa_dev {
-	const struct mv_cesa_caps *caps;
-	void __iomem *regs;
-	struct device *dev;
-	unsigned int sram_size;
-	spinlock_t lock;
-	struct mv_cesa_engine *engines;
-	struct mv_cesa_dev_dma *dma;
-};
-
 /**
  * struct mv_cesa_engine - CESA engine
  * @id:			engine id
@@ -471,6 +450,27 @@ struct mv_cesa_engine {
 	int irq;
 };
 
+/**
+ * struct mv_cesa_dev - CESA device
+ * @caps:	device capabilities
+ * @regs:	device registers
+ * @sram_size:	usable SRAM size
+ * @lock:	device lock
+ * @dma:	dma pools
+ * @engines:	array of engines
+ *
+ * Structure storing CESA device information.
+ */
+struct mv_cesa_dev {
+	const struct mv_cesa_caps *caps;
+	void __iomem *regs;
+	struct device *dev;
+	unsigned int sram_size;
+	spinlock_t lock;
+	struct mv_cesa_dev_dma *dma;
+	struct mv_cesa_engine engines[];
+};
+
 /**
  * struct mv_cesa_req_ops - CESA request operations
  * @process:	process a request chunk result (should return 0 if the
-- 
2.54.0


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2026-04-25  2:33 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-25  2:32 [PATCH] crypto: cesa: allocate engines with main struct Rosen Penev

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox