* [PATCH 01/10] staging: ccree: remove inline qualifiers
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-04 9:36 ` Dan Carpenter
2017-12-03 13:58 ` [PATCH 02/10] staging: ccree: remove unproven likely/unlikely Gilad Ben-Yossef
` (9 subsequent siblings)
10 siblings, 1 reply; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: Ofir Drang, linux-kernel, linux-crypto, driverdev-devel, devel
The ccree drivers was marking a lot of big functions in C file as
static inline for no good reason. Remove the inline qualifier from
any but the few truly single line functions.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_aead.c | 40 ++++++++++++++++-----------------
drivers/staging/ccree/ssi_buffer_mgr.c | 35 ++++++++++++++---------------
drivers/staging/ccree/ssi_cipher.c | 6 ++---
drivers/staging/ccree/ssi_hash.c | 2 +-
drivers/staging/ccree/ssi_request_mgr.c | 4 ++--
5 files changed, 43 insertions(+), 44 deletions(-)
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 2cd2b0f..56a4059 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -733,7 +733,7 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
}
#endif /*SSI_CC_HAS_AES_CCM*/
-static inline void
+static void
ssi_aead_create_assoc_desc(
struct aead_request *areq,
unsigned int flow_mode,
@@ -776,7 +776,7 @@ ssi_aead_create_assoc_desc(
*seq_size = (++idx);
}
-static inline void
+static void
ssi_aead_process_authenc_data_desc(
struct aead_request *areq,
unsigned int flow_mode,
@@ -843,7 +843,7 @@ ssi_aead_process_authenc_data_desc(
*seq_size = (++idx);
}
-static inline void
+static void
ssi_aead_process_cipher_data_desc(
struct aead_request *areq,
unsigned int flow_mode,
@@ -891,7 +891,7 @@ ssi_aead_process_cipher_data_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_process_digest_result_desc(
+static void ssi_aead_process_digest_result_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -942,7 +942,7 @@ static inline void ssi_aead_process_digest_result_desc(
*seq_size = (++idx);
}
-static inline void ssi_aead_setup_cipher_desc(
+static void ssi_aead_setup_cipher_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -988,7 +988,7 @@ static inline void ssi_aead_setup_cipher_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_cipher(
+static void ssi_aead_process_cipher(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size,
@@ -1014,7 +1014,7 @@ static inline void ssi_aead_process_cipher(
*seq_size = idx;
}
-static inline void ssi_aead_hmac_setup_digest_desc(
+static void ssi_aead_hmac_setup_digest_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1051,7 +1051,7 @@ static inline void ssi_aead_hmac_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_xcbc_setup_digest_desc(
+static void ssi_aead_xcbc_setup_digest_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1113,7 +1113,7 @@ static inline void ssi_aead_xcbc_setup_digest_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_header_desc(
+static void ssi_aead_process_digest_header_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1127,7 +1127,7 @@ static inline void ssi_aead_process_digest_header_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_digest_scheme_desc(
+static void ssi_aead_process_digest_scheme_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1192,7 +1192,7 @@ static inline void ssi_aead_process_digest_scheme_desc(
*seq_size = idx;
}
-static inline void ssi_aead_load_mlli_to_sram(
+static void ssi_aead_load_mlli_to_sram(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1222,7 +1222,7 @@ static inline void ssi_aead_load_mlli_to_sram(
}
}
-static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
+static enum cc_flow_mode ssi_aead_get_data_flow_mode(
enum drv_crypto_direction direct,
enum cc_flow_mode setup_flow_mode,
bool is_single_pass)
@@ -1248,7 +1248,7 @@ static inline enum cc_flow_mode ssi_aead_get_data_flow_mode(
return data_flow_mode;
}
-static inline void ssi_aead_hmac_authenc(
+static void ssi_aead_hmac_authenc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1304,7 +1304,7 @@ static inline void ssi_aead_hmac_authenc(
}
}
-static inline void
+static void
ssi_aead_xcbc_authenc(
struct aead_request *req,
struct cc_hw_desc desc[],
@@ -1456,7 +1456,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
return 0;
}
-static inline int ssi_aead_ccm(
+static int ssi_aead_ccm(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1667,7 +1667,7 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)
#if SSI_CC_HAS_AES_GCM
-static inline void ssi_aead_gcm_setup_ghash_desc(
+static void ssi_aead_gcm_setup_ghash_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1747,7 +1747,7 @@ static inline void ssi_aead_gcm_setup_ghash_desc(
*seq_size = idx;
}
-static inline void ssi_aead_gcm_setup_gctr_desc(
+static void ssi_aead_gcm_setup_gctr_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1785,7 +1785,7 @@ static inline void ssi_aead_gcm_setup_gctr_desc(
*seq_size = idx;
}
-static inline void ssi_aead_process_gcm_result_desc(
+static void ssi_aead_process_gcm_result_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1851,7 +1851,7 @@ static inline void ssi_aead_process_gcm_result_desc(
*seq_size = idx;
}
-static inline int ssi_aead_gcm(
+static int ssi_aead_gcm(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1892,7 +1892,7 @@ static inline int ssi_aead_gcm(
}
#ifdef CC_DEBUG
-static inline void ssi_aead_dump_gcm(
+static void ssi_aead_dump_gcm(
const char *title,
struct aead_request *req)
{
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index c542225..dc45333 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -80,8 +80,8 @@ static inline char *cc_dma_buf_type(enum ssi_req_dma_buf_type type)
* @req: aead request object
* @dir: [IN] copy from/to sgl
*/
-static inline void cc_copy_mac(struct device *dev, struct aead_request *req,
- enum ssi_sg_cpy_direct dir)
+static void cc_copy_mac(struct device *dev, struct aead_request *req,
+ enum ssi_sg_cpy_direct dir)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -168,7 +168,7 @@ void cc_copy_sg_portion(
(direct == SSI_SG_TO_BUF));
}
-static inline int cc_render_buff_to_mlli(
+static int cc_render_buff_to_mlli(
struct device *dev, dma_addr_t buff_dma, u32 buff_size,
u32 *curr_nents, u32 **mlli_entry_pp)
{
@@ -204,7 +204,7 @@ static inline int cc_render_buff_to_mlli(
return 0;
}
-static inline int cc_render_sg_to_mlli(
+static int cc_render_sg_to_mlli(
struct device *dev, struct scatterlist *sgl,
u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents,
u32 **mlli_entry_pp)
@@ -293,7 +293,7 @@ static int cc_generate_mlli(
return rc;
}
-static inline void cc_add_buffer_entry(
+static void cc_add_buffer_entry(
struct device *dev, struct buffer_array *sgl_data,
dma_addr_t buffer_dma, unsigned int buffer_len,
bool is_last_entry, u32 *mlli_nents)
@@ -314,7 +314,7 @@ static inline void cc_add_buffer_entry(
sgl_data->num_of_buffers++;
}
-static inline void cc_add_sg_entry(
+static void cc_add_sg_entry(
struct device *dev,
struct buffer_array *sgl_data,
unsigned int nents,
@@ -425,7 +425,7 @@ static int cc_map_sg(
return 0;
}
-static inline int
+static int
ssi_aead_handle_config_buf(struct device *dev,
struct aead_req_ctx *areq_ctx,
u8 *config_data,
@@ -455,11 +455,10 @@ ssi_aead_handle_config_buf(struct device *dev,
return 0;
}
-static inline int ssi_ahash_handle_curr_buf(struct device *dev,
- struct ahash_req_ctx *areq_ctx,
- u8 *curr_buff,
- u32 curr_buff_cnt,
- struct buffer_array *sg_data)
+static int ssi_ahash_handle_curr_buf(struct device *dev,
+ struct ahash_req_ctx *areq_ctx,
+ u8 *curr_buff, u32 curr_buff_cnt,
+ struct buffer_array *sg_data)
{
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
@@ -710,7 +709,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
}
-static inline int cc_get_aead_icv_nents(
+static int cc_get_aead_icv_nents(
struct device *dev,
struct scatterlist *sgl,
unsigned int sgl_nents,
@@ -764,7 +763,7 @@ static inline int cc_get_aead_icv_nents(
return nents;
}
-static inline int cc_aead_chain_iv(
+static int cc_aead_chain_iv(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
@@ -809,7 +808,7 @@ static inline int cc_aead_chain_iv(
return rc;
}
-static inline int cc_aead_chain_assoc(
+static int cc_aead_chain_assoc(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
@@ -904,7 +903,7 @@ static inline int cc_aead_chain_assoc(
return rc;
}
-static inline void cc_prepare_aead_data_dlli(
+static void cc_prepare_aead_data_dlli(
struct aead_request *req,
u32 *src_last_bytes, u32 *dst_last_bytes)
{
@@ -940,7 +939,7 @@ static inline void cc_prepare_aead_data_dlli(
}
}
-static inline int cc_prepare_aead_data_mlli(
+static int cc_prepare_aead_data_mlli(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
@@ -1075,7 +1074,7 @@ static inline int cc_prepare_aead_data_mlli(
return rc;
}
-static inline int cc_aead_chain_data(
+static int cc_aead_chain_data(
struct ssi_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index f6e680c..6c4b93d 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -432,7 +432,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
return 0;
}
-static inline void
+static void
ssi_blkcipher_create_setup_desc(
struct crypto_tfm *tfm,
struct blkcipher_req_ctx *req_ctx,
@@ -562,7 +562,7 @@ ssi_blkcipher_create_setup_desc(
}
#if SSI_CC_HAS_MULTI2
-static inline void ssi_blkcipher_create_multi2_setup_desc(
+static void ssi_blkcipher_create_multi2_setup_desc(
struct crypto_tfm *tfm,
struct blkcipher_req_ctx *req_ctx,
unsigned int ivsize,
@@ -606,7 +606,7 @@ static inline void ssi_blkcipher_create_multi2_setup_desc(
}
#endif /*SSI_CC_HAS_MULTI2*/
-static inline void
+static void
ssi_blkcipher_create_data_desc(
struct crypto_tfm *tfm,
struct blkcipher_req_ctx *req_ctx,
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 4d79725..c955e50 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -114,7 +114,7 @@ static void ssi_hash_create_data_desc(
bool is_not_last_data,
unsigned int *seq_size);
-static inline void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
+static void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
{
if (unlikely(mode == DRV_HASH_MD5 ||
mode == DRV_HASH_SHA384 ||
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 0882efd..9883d14 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -165,7 +165,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
return rc;
}
-static inline void enqueue_seq(
+static void enqueue_seq(
void __iomem *cc_base,
struct cc_hw_desc seq[], unsigned int seq_len)
{
@@ -202,7 +202,7 @@ static void request_mgr_complete(struct device *dev, void *dx_compl_h)
complete(this_compl);
}
-static inline int request_mgr_queues_status_check(
+static int request_mgr_queues_status_check(
struct ssi_drvdata *drvdata,
struct ssi_request_mgr_handle *req_mgr_h,
unsigned int total_seq_len)
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH 01/10] staging: ccree: remove inline qualifiers
2017-12-03 13:58 ` [PATCH 01/10] staging: ccree: remove inline qualifiers Gilad Ben-Yossef
@ 2017-12-04 9:36 ` Dan Carpenter
2017-12-07 7:00 ` Gilad Ben-Yossef
0 siblings, 1 reply; 16+ messages in thread
From: Dan Carpenter @ 2017-12-04 9:36 UTC (permalink / raw)
To: Gilad Ben-Yossef
Cc: Greg Kroah-Hartman, linux-crypto, devel, driverdev-devel,
linux-kernel, Ofir Drang
On Sun, Dec 03, 2017 at 01:58:12PM +0000, Gilad Ben-Yossef wrote:
> The ccree drivers was marking a lot of big functions in C file as
> static inline for no good reason. Remove the inline qualifier from
> any but the few truly single line functions.
>
The compiler is free to ignore inline hints... It probably would make
single line functions inline anyway.
regards,
dan carpenter
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 01/10] staging: ccree: remove inline qualifiers
2017-12-04 9:36 ` Dan Carpenter
@ 2017-12-07 7:00 ` Gilad Ben-Yossef
2017-12-07 8:28 ` Dan Carpenter
0 siblings, 1 reply; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-07 7:00 UTC (permalink / raw)
To: Dan Carpenter
Cc: Greg Kroah-Hartman, Linux Crypto Mailing List, devel,
driverdev-devel, Linux kernel mailing list, Ofir Drang
On Mon, Dec 4, 2017 at 11:36 AM, Dan Carpenter <dan.carpenter@oracle.com> wrote:
> On Sun, Dec 03, 2017 at 01:58:12PM +0000, Gilad Ben-Yossef wrote:
>> The ccree drivers was marking a lot of big functions in C file as
>> static inline for no good reason. Remove the inline qualifier from
>> any but the few truly single line functions.
>>
>
> The compiler is free to ignore inline hints... It probably would make
> single line functions inline anyway.
>
Yes. I think of it more as a note to the reader: "don't add stuff to
this function. it is meant to be short and simple".
--
Gilad Ben-Yossef
Chief Coffee Drinker
"If you take a class in large-scale robotics, can you end up in a
situation where the homework eats your dog?"
-- Jean-Baptiste Queru
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH 01/10] staging: ccree: remove inline qualifiers
2017-12-07 7:00 ` Gilad Ben-Yossef
@ 2017-12-07 8:28 ` Dan Carpenter
0 siblings, 0 replies; 16+ messages in thread
From: Dan Carpenter @ 2017-12-07 8:28 UTC (permalink / raw)
To: Gilad Ben-Yossef
Cc: Greg Kroah-Hartman, Linux Crypto Mailing List, devel,
driverdev-devel, Linux kernel mailing list, Ofir Drang
On Thu, Dec 07, 2017 at 09:00:11AM +0200, Gilad Ben-Yossef wrote:
> On Mon, Dec 4, 2017 at 11:36 AM, Dan Carpenter <dan.carpenter@oracle.com> wrote:
> > On Sun, Dec 03, 2017 at 01:58:12PM +0000, Gilad Ben-Yossef wrote:
> >> The ccree drivers was marking a lot of big functions in C file as
> >> static inline for no good reason. Remove the inline qualifier from
> >> any but the few truly single line functions.
> >>
> >
> > The compiler is free to ignore inline hints... It probably would make
> > single line functions inline anyway.
> >
>
> Yes. I think of it more as a note to the reader: "don't add stuff to
> this function. it is meant to be short and simple".
>
Ah. Fine.
regards,
dan carpenter
^ permalink raw reply [flat|nested] 16+ messages in thread
* [PATCH 02/10] staging: ccree: remove unproven likely/unlikely
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 01/10] staging: ccree: remove inline qualifiers Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 03/10] staging: ccree: remove more unnecessary parentheses Gilad Ben-Yossef
` (8 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: Ofir Drang, linux-kernel, linux-crypto, driverdev-devel, devel
The ccree code made a lot of use of likely/unlikely qualifiers without
proven measurements showing any benefits. Remove them all until we
see what is justified and what is not.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_aead.c | 57 ++++++------
drivers/staging/ccree/ssi_buffer_mgr.c | 154 +++++++++++++++-----------------
drivers/staging/ccree/ssi_cipher.c | 54 +++++------
drivers/staging/ccree/ssi_driver.c | 34 +++----
drivers/staging/ccree/ssi_hash.c | 103 ++++++++++-----------
drivers/staging/ccree/ssi_ivgen.c | 2 +-
drivers/staging/ccree/ssi_request_mgr.c | 28 +++---
drivers/staging/ccree/ssi_sram_mgr.c | 4 +-
8 files changed, 209 insertions(+), 227 deletions(-)
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 56a4059..0b085dc 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -251,7 +251,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req)
err = -EBADMSG;
}
} else { /*ENCRYPT*/
- if (unlikely(areq_ctx->is_icv_fragmented)) {
+ if (areq_ctx->is_icv_fragmented) {
u32 skip = areq->cryptlen + areq_ctx->dst_offset;
cc_copy_sg_portion(dev, areq_ctx->mac_buf,
@@ -412,7 +412,7 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
return -EINVAL;
}
/* Check cipher key size */
- if (unlikely(ctx->flow_mode == S_DIN_to_DES)) {
+ if (ctx->flow_mode == S_DIN_to_DES) {
if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
ctx->enc_keylen);
@@ -465,10 +465,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
hashmode = DRV_HASH_HW_SHA256;
}
- if (likely(keylen != 0)) {
+ if (keylen != 0) {
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, key_dma_addr))) {
+ if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -547,10 +547,10 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc))
+ if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
- if (likely(key_dma_addr))
+ if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
return rc;
@@ -607,7 +607,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
rc = validate_keys_sizes(ctx);
- if (unlikely(rc))
+ if (rc)
goto badkey;
/* STAT_PHASE_1: Copy key to ctx */
@@ -646,7 +646,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
}
@@ -818,7 +818,7 @@ ssi_aead_process_authenc_data_desc(
ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
mlli_addr = areq_ctx->dst.sram_addr;
mlli_nents = areq_ctx->dst.mlli_nents;
@@ -1202,10 +1202,9 @@ static void ssi_aead_load_mlli_to_sram(
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (unlikely(
- req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
- req_ctx->data_buff_type == SSI_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass)) {
+ if (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
+ req_ctx->data_buff_type == SSI_DMA_BUF_MLLI ||
+ !req_ctx->is_single_pass) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1231,17 +1230,17 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode(
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
AES_to_HASH_and_DOUT : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
DES_to_HASH_and_DOUT : DIN_DES_DOUT;
} else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES)
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
AES_and_HASH : DIN_AES_DOUT;
else
- data_flow_mode = likely(is_single_pass) ?
+ data_flow_mode = is_single_pass ?
DES_and_HASH : DIN_DES_DOUT;
}
@@ -1367,16 +1366,16 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
- if (unlikely(direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
- req->cryptlen < ctx->authsize))
+ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
+ req->cryptlen < ctx->authsize)
goto data_size_err;
areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
switch (ctx->flow_mode) {
case S_DIN_to_AES:
- if (unlikely(ctx->cipher_mode == DRV_CIPHER_CBC &&
- !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)))
+ if (ctx->cipher_mode == DRV_CIPHER_CBC &&
+ !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
goto data_size_err;
if (ctx->cipher_mode == DRV_CIPHER_CCM)
break;
@@ -1395,9 +1394,9 @@ static int validate_data_size(struct ssi_aead_ctx *ctx,
break;
case S_DIN_to_DES:
- if (unlikely(!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
goto data_size_err;
- if (unlikely(!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)))
+ if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
areq_ctx->is_single_pass = false;
break;
default:
@@ -2024,7 +2023,7 @@ static int ssi_aead_process(struct aead_request *req,
/* STAT_PHASE_0: Init and sanity checks */
/* Check data length according to mode */
- if (unlikely(validate_data_size(ctx, direct, req))) {
+ if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
req->cryptlen, req->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
@@ -2073,7 +2072,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM) {
rc = config_ccm_adata(req);
- if (unlikely(rc)) {
+ if (rc) {
dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
rc);
goto exit;
@@ -2088,7 +2087,7 @@ static int ssi_aead_process(struct aead_request *req,
#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
rc = config_gcm_context(req);
- if (unlikely(rc)) {
+ if (rc) {
dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
rc);
goto exit;
@@ -2097,7 +2096,7 @@ static int ssi_aead_process(struct aead_request *req,
#endif /*SSI_CC_HAS_AES_GCM*/
rc = cc_map_aead_request(ctx->drvdata, req);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit;
}
@@ -2173,7 +2172,7 @@ static int ssi_aead_process(struct aead_request *req,
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_aead_request(dev, req);
}
@@ -2829,7 +2828,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
}
t_alg->drvdata = drvdata;
rc = crypto_register_aead(&t_alg->aead_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->aead_alg.base.cra_driver_name);
goto fail2;
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index dc45333..7ceee91 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -247,7 +247,7 @@ static int cc_generate_mlli(
mlli_params->mlli_virt_addr = dma_pool_alloc(
mlli_params->curr_pool, GFP_KERNEL,
&mlli_params->mlli_dma_addr);
- if (unlikely(!mlli_params->mlli_virt_addr)) {
+ if (!mlli_params->mlli_virt_addr) {
dev_err(dev, "dma_pool_alloc() failed\n");
rc = -ENOMEM;
goto build_mlli_exit;
@@ -350,7 +350,7 @@ cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
- if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
@@ -379,7 +379,7 @@ static int cc_map_sg(
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
- if (unlikely(dma_map_sg(dev, sg, 1, direction) != 1)) {
+ if (dma_map_sg(dev, sg, 1, direction) != 1) {
dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
@@ -403,7 +403,7 @@ static int cc_map_sg(
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (unlikely(*mapped_nents == 0)) {
+ if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -414,7 +414,7 @@ static int cc_map_sg(
*/
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction);
- if (unlikely(*mapped_nents != *nents)) {
+ if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -436,8 +436,7 @@ ssi_aead_handle_config_buf(struct device *dev,
/* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
- if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() config buffer failed\n");
return -ENOMEM;
}
@@ -463,8 +462,7 @@ static int ssi_ahash_handle_curr_buf(struct device *dev,
dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
- if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
- DMA_TO_DEVICE) != 1)) {
+ if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
dev_err(dev, "dma_map_sg() src buffer failed\n");
return -ENOMEM;
}
@@ -490,7 +488,7 @@ void cc_unmap_blkcipher_request(
{
struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
- if (likely(req_ctx->gen_ctx.iv_dma_addr)) {
+ if (req_ctx->gen_ctx.iv_dma_addr) {
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
@@ -537,15 +535,14 @@ int cc_map_blkcipher_request(
sg_data.num_of_buffers = 0;
/* Map IV buffer */
- if (likely(ivsize)) {
+ if (ivsize) {
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- req_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
return -ENOMEM;
@@ -559,16 +556,16 @@ int cc_map_blkcipher_request(
/* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
- if (unlikely(rc)) {
+ if (rc) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
- if (unlikely(src == dst)) {
+ if (src == dst) {
/* Handle inplace operation */
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
req_ctx->out_nents = 0;
cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
nbytes, 0, true,
@@ -576,17 +573,16 @@ int cc_map_blkcipher_request(
}
} else {
/* Map the dst sg */
- if (unlikely(cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
- &req_ctx->out_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents))) {
+ if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+ &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
rc = -ENOMEM;
goto ablkcipher_exit;
}
if (mapped_nents > 1)
req_ctx->dma_buf_type = SSI_DMA_BUF_MLLI;
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
nbytes, 0, true,
&req_ctx->in_mlli_nents);
@@ -596,10 +592,10 @@ int cc_map_blkcipher_request(
}
}
- if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc))
+ if (rc)
goto ablkcipher_exit;
}
@@ -690,7 +686,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
cc_get_sgl_nents(dev, req->src, size_to_unmap,
&dummy, &chained),
DMA_BIDIRECTIONAL);
- if (unlikely(req->src != req->dst)) {
+ if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
@@ -700,7 +696,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
- likely(req->src == req->dst)) {
+ req->src == req->dst) {
/* copy back mac from temporary location to deal with possible
* data memory overriding that caused by cache coherence
* problem.
@@ -774,7 +770,7 @@ static int cc_aead_chain_iv(
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
- if (unlikely(!req->iv)) {
+ if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
goto chain_iv_exit;
}
@@ -782,7 +778,7 @@ static int cc_aead_chain_iv(
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
hw_iv_size,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr))) {
+ if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
rc = -ENOMEM;
@@ -831,7 +827,7 @@ static int cc_aead_chain_assoc(
goto chain_assoc_exit;
}
- if (unlikely(req->assoclen == 0)) {
+ if (req->assoclen == 0) {
areq_ctx->assoc_buff_type = SSI_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -861,7 +857,7 @@ static int cc_aead_chain_assoc(
mapped_nents++;
}
}
- if (unlikely(mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
return -ENOMEM;
@@ -872,8 +868,7 @@ static int cc_aead_chain_assoc(
* ccm header configurations
*/
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
- if (unlikely((mapped_nents + 1) >
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
+ if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
(areq_ctx->assoc.nents + 1),
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -882,14 +877,12 @@ static int cc_aead_chain_assoc(
}
}
- if (likely(mapped_nents == 1) &&
- areq_ctx->ccm_hdr_size == ccm_header_size_null)
+ if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
areq_ctx->assoc_buff_type = SSI_DMA_BUF_DLLI;
else
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
- if (unlikely((do_chain) ||
- areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI)) {
+ if ((do_chain) || areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
@@ -912,7 +905,7 @@ static void cc_prepare_aead_data_dlli(
unsigned int authsize = areq_ctx->req_authsize;
areq_ctx->is_icv_fragmented = false;
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
areq_ctx->icv_dma_addr = sg_dma_address(
areq_ctx->src_sgl) +
@@ -952,7 +945,7 @@ static int cc_prepare_aead_data_mlli(
int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
- if (likely(req->src == req->dst)) {
+ if (req->src == req->dst) {
/*INPLACE*/
cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
areq_ctx->src_sgl, areq_ctx->cryptlen,
@@ -963,12 +956,12 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->src.nents,
authsize, *src_last_bytes,
&areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (unlikely(areq_ctx->is_icv_fragmented)) {
+ if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
* verification is made by CPU compare in order to
* simplify MAC verification upon request completion
@@ -1013,7 +1006,7 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->src.nents,
authsize, *src_last_bytes,
&areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
@@ -1022,7 +1015,7 @@ static int cc_prepare_aead_data_mlli(
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
- if (unlikely(areq_ctx->is_icv_fragmented)) {
+ if (areq_ctx->is_icv_fragmented) {
cc_copy_mac(dev, req, SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
@@ -1051,12 +1044,12 @@ static int cc_prepare_aead_data_mlli(
areq_ctx->dst.nents,
authsize, *dst_last_bytes,
&areq_ctx->is_icv_fragmented);
- if (unlikely(icv_nents < 0)) {
+ if (icv_nents < 0) {
rc = -ENOTSUPP;
goto prepare_data_mlli_exit;
}
- if (likely(!areq_ctx->is_icv_fragmented)) {
+ if (!areq_ctx->is_icv_fragmented) {
/* Contig. ICV */
areq_ctx->icv_dma_addr = sg_dma_address(
&areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]) +
@@ -1127,7 +1120,7 @@ static int cc_aead_chain_data(
sg_index += areq_ctx->src_sgl->length;
src_mapped_nents--;
}
- if (unlikely(src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
@@ -1148,7 +1141,7 @@ static int cc_aead_chain_data(
&areq_ctx->dst.nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents);
- if (unlikely(rc)) {
+ if (rc) {
rc = -ENOMEM;
goto chain_data_exit;
}
@@ -1171,7 +1164,7 @@ static int cc_aead_chain_data(
sg_index += areq_ctx->dst_sgl->length;
dst_mapped_nents--;
}
- if (unlikely(dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES)) {
+ if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
return -ENOMEM;
@@ -1271,7 +1264,7 @@ int cc_map_aead_request(
*/
if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
- likely(req->src == req->dst))
+ req->src == req->dst)
cc_copy_mac(dev, req, SSI_SG_TO_BUF);
/* cacluate the size for cipher remove ICV in decrypt*/
@@ -1282,7 +1275,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
MAX_MAC_SIZE, areq_ctx->mac_buf);
rc = -ENOMEM;
@@ -1296,7 +1289,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, addr);
areq_ctx->ccm_iv0_dma_addr = 0;
@@ -1317,7 +1310,7 @@ int cc_map_aead_request(
if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, areq_ctx->hkey);
rc = -ENOMEM;
@@ -1327,7 +1320,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
rc = -ENOMEM;
@@ -1338,7 +1331,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
areq_ctx->gcm_iv_inc1_dma_addr = 0;
@@ -1350,7 +1343,7 @@ int cc_map_aead_request(
dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
AES_BLOCK_SIZE, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
areq_ctx->gcm_iv_inc2_dma_addr = 0;
@@ -1372,12 +1365,12 @@ int cc_map_aead_request(
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES),
&dummy, &mapped_nents);
- if (unlikely(rc)) {
+ if (rc) {
rc = -ENOMEM;
goto aead_map_failure;
}
- if (likely(areq_ctx->is_single_pass)) {
+ if (areq_ctx->is_single_pass) {
/*
* Create MLLI table for:
* (1) Assoc. data
@@ -1385,13 +1378,13 @@ int cc_map_aead_request(
* Note: IV is contg. buffer (not an SGL)
*/
rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
} else { /* DOUBLE-PASS flow */
/*
@@ -1415,25 +1408,24 @@ int cc_map_aead_request(
* (4) MLLI for dst
*/
rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
}
/* Mlli support -start building the MLLI according to the above
* results
*/
- if (unlikely(
- areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
- areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
+ areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = cc_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc))
+ if (rc)
goto aead_map_failure;
cc_update_aead_mlli_nents(drvdata, req);
@@ -1473,7 +1465,7 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(nbytes == 0 && *curr_buff_cnt == 0)) {
+ if (nbytes == 0 && *curr_buff_cnt == 0) {
/* nothing to do */
return 0;
}
@@ -1488,10 +1480,9 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
}
if (src && nbytes > 0 && do_update) {
- if (unlikely(cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
- &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES,
- &dummy, &mapped_nents))) {
+ if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
+ &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+ &dummy, &mapped_nents)) {
goto unmap_curr_buff;
}
if (src && mapped_nents == 1 &&
@@ -1507,12 +1498,12 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
}
/*build mlli */
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
0, true, &areq_ctx->mlli_nents);
- if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params)))
+ if (cc_generate_mlli(dev, &sg_data, mlli_params))
goto fail_unmap_din;
}
/* change the buffer index for the unmap function */
@@ -1563,7 +1554,7 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
sg_data.num_of_buffers = 0;
areq_ctx->in_nents = 0;
- if (unlikely(total_in_len < block_size)) {
+ if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
@@ -1604,11 +1595,10 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
}
if (update_data_len > *curr_buff_cnt) {
- if (unlikely(cc_map_sg(dev, src,
- (update_data_len - *curr_buff_cnt),
- DMA_TO_DEVICE, &areq_ctx->in_nents,
- LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
- &mapped_nents))) {
+ if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
+ DMA_TO_DEVICE, &areq_ctx->in_nents,
+ LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
+ &mapped_nents)) {
goto unmap_curr_buff;
}
if (mapped_nents == 1 &&
@@ -1624,13 +1614,13 @@ int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
}
}
- if (unlikely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI)) {
+ if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_MLLI) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
/* add the src data to the sg_data */
cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
(update_data_len - *curr_buff_cnt), 0, true,
&areq_ctx->mlli_nents);
- if (unlikely(cc_generate_mlli(dev, &sg_data, mlli_params)))
+ if (cc_generate_mlli(dev, &sg_data, mlli_params))
goto fail_unmap_din;
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
@@ -1666,7 +1656,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
areq_ctx->mlli_params.mlli_dma_addr);
}
- if ((src) && likely(areq_ctx->in_nents)) {
+ if ((src) && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
@@ -1707,7 +1697,7 @@ int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
LLI_ENTRY_BYTE_SIZE,
MLLI_TABLE_MIN_ALIGNMENT, 0);
- if (unlikely(!buff_mgr_handle->mlli_buffs_pool))
+ if (!buff_mgr_handle->mlli_buffs_pool)
goto error;
return 0;
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 6c4b93d..9019615 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -76,30 +76,30 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size)
switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
- if (likely(ctx_p->cipher_mode != DRV_CIPHER_XTS &&
- ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
- ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER))
+ if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+ ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+ ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
return 0;
break;
case CC_AES_256_BIT_KEY_SIZE:
return 0;
case (CC_AES_192_BIT_KEY_SIZE * 2):
case (CC_AES_256_BIT_KEY_SIZE * 2):
- if (likely(ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER))
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
return 0;
break;
default:
break;
}
case S_DIN_to_DES:
- if (likely(size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE))
+ if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
#if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2:
- if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
+ if (size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE)
return 0;
break;
#endif
@@ -122,7 +122,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
return 0;
break;
case DRV_CIPHER_CBC_CTS:
- if (likely(size >= AES_BLOCK_SIZE))
+ if (size >= AES_BLOCK_SIZE)
return 0;
break;
case DRV_CIPHER_OFB:
@@ -132,7 +132,7 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
case DRV_CIPHER_CBC:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
+ if (IS_ALIGNED(size, AES_BLOCK_SIZE))
return 0;
break;
default:
@@ -140,14 +140,14 @@ static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p,
}
break;
case S_DIN_to_DES:
- if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
+ if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
break;
#if SSI_CC_HAS_MULTI2
case S_DIN_to_MULTI2:
switch (ctx_p->cipher_mode) {
case DRV_MULTI2_CBC:
- if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
+ if (IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE))
return 0;
break;
case DRV_MULTI2_OFB:
@@ -272,10 +272,10 @@ static int ssi_verify_3des_keys(const u8 *key, unsigned int keylen)
struct tdes_keys *tdes_key = (struct tdes_keys *)key;
/* verify key1 != key2 and key3 != key2*/
- if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
- sizeof(tdes_key->key1)) == 0) ||
- (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
- sizeof(tdes_key->key3)) == 0))) {
+ if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key1)) == 0) ||
+ (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+ sizeof(tdes_key->key3)) == 0)) {
return -ENOEXEC;
}
@@ -320,7 +320,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
keylen -= 1;
#endif /*SSI_CC_HAS_MULTI2*/
- if (unlikely(validate_keys_sizes(ctx_p, keylen))) {
+ if (validate_keys_sizes(ctx_p, keylen)) {
dev_err(dev, "Unsupported key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
@@ -330,13 +330,13 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
/* setting HW key slots */
struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
- if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
+ if (ctx_p->flow_mode != S_DIN_to_AES) {
dev_err(dev, "HW key not supported for non-AES flows\n");
return -EINVAL;
}
ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
- if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key1 number (%d)\n",
hki->hw_key1);
return -EINVAL;
@@ -345,14 +345,14 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (unlikely(hki->hw_key1 == hki->hw_key2)) {
+ if (hki->hw_key1 == hki->hw_key2) {
dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
hki->hw_key1, hki->hw_key2);
return -EINVAL;
}
ctx_p->hw.key2_slot =
hw_key_to_cc_hw_key(hki->hw_key2);
- if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
dev_err(dev, "Unsupported hw key2 number (%d)\n",
hki->hw_key2);
return -EINVAL;
@@ -367,7 +367,7 @@ static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
// verify weak keys
if (ctx_p->flow_mode == S_DIN_to_DES) {
- if (unlikely(!des_ekey(tmp, key)) &&
+ if (!des_ekey(tmp, key) &&
(crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
dev_dbg(dev, "weak DES key");
@@ -637,7 +637,7 @@ ssi_blkcipher_create_data_desc(
return;
}
/* Process */
- if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI) {
dev_dbg(dev, " data params addr %pad length 0x%X\n",
&sg_dma_address(src), nbytes);
dev_dbg(dev, " data params addr %pad length 0x%X\n",
@@ -760,7 +760,7 @@ static int ssi_blkcipher_process(
/* STAT_PHASE_0: Init and sanity checks */
/* TODO: check data length according to mode */
- if (unlikely(validate_data_size(ctx_p, nbytes))) {
+ if (validate_data_size(ctx_p, nbytes)) {
dev_err(dev, "Unsupported data size %d.\n", nbytes);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
rc = -EINVAL;
@@ -806,7 +806,7 @@ static int ssi_blkcipher_process(
rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
req_ctx->iv, src, dst);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "map_request() failed\n");
goto exit_process;
}
@@ -839,7 +839,7 @@ static int ssi_blkcipher_process(
rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len,
(!areq) ? 0 : 1);
if (areq) {
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
/* Failed to send the request or request completed
* synchronously
*/
@@ -1364,7 +1364,7 @@ int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
rc = crypto_register_alg(&t_alg->crypto_alg);
dev_dbg(dev, "%s alg registration rc = %x\n",
t_alg->crypto_alg.cra_driver_name, rc);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 3cb2296..6282c37 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -100,7 +100,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
/* read the interrupt status */
irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
dev_dbg(dev, "Got IRR=0x%08X\n", irr);
- if (unlikely(irr == 0)) { /* Probably shared interrupt line */
+ if (irr == 0) { /* Probably shared interrupt line */
dev_err(dev, "Got interrupt with empty IRR\n");
return IRQ_NONE;
}
@@ -111,7 +111,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (likely((irr & SSI_COMP_IRQ_MASK))) {
+ if ((irr & SSI_COMP_IRQ_MASK)) {
/* Mask AXI completion interrupt - will be unmasked in
* Deferred service handler
*/
@@ -121,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#ifdef CC_SUPPORT_FIPS
/* TEE FIPS interrupt */
- if (likely((irr & SSI_GPR0_IRQ_MASK))) {
+ if ((irr & SSI_GPR0_IRQ_MASK)) {
/* Mask interrupt - will be unmasked in Deferred service
* handler
*/
@@ -131,7 +131,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#endif
/* AXI error interrupt */
- if (unlikely((irr & SSI_AXI_ERR_IRQ_MASK))) {
+ if ((irr & SSI_AXI_ERR_IRQ_MASK)) {
u32 axi_err;
/* Read the AXI error ID */
@@ -142,7 +142,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
irr &= ~SSI_AXI_ERR_IRQ_MASK;
}
- if (unlikely(irr)) {
+ if (irr) {
dev_dbg(dev, "IRR includes unknown cause bits (0x%08X)\n",
irr);
/* Just warning */
@@ -295,78 +295,78 @@ static int init_cc_resources(struct platform_device *plat_dev)
DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "init_cc_regs failed\n");
goto post_clk_err;
}
#ifdef ENABLE_CC_SYSFS
rc = ssi_sysfs_init(&dev->kobj, new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "init_stat_db failed\n");
goto post_regs_err;
}
#endif
rc = ssi_fips_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "SSI_FIPS_INIT failed 0x%x\n", rc);
goto post_sysfs_err;
}
rc = ssi_sram_mgr_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_sram_mgr_init failed\n");
goto post_fips_init_err;
}
new_drvdata->mlli_sram_addr =
cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
- if (unlikely(new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR)) {
+ if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
rc = -ENOMEM;
goto post_sram_mgr_err;
}
rc = request_mgr_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "request_mgr_init failed\n");
goto post_sram_mgr_err;
}
rc = cc_buffer_mgr_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "buffer_mgr_init failed\n");
goto post_req_mgr_err;
}
rc = cc_pm_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_power_mgr_init failed\n");
goto post_buf_mgr_err;
}
rc = ssi_ivgen_init(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_ivgen_init failed\n");
goto post_power_mgr_err;
}
/* Allocate crypto algs */
rc = ssi_ablkcipher_alloc(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_ablkcipher_alloc failed\n");
goto post_ivgen_err;
}
/* hash must be allocated before aead since hash exports APIs */
rc = ssi_hash_alloc(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_hash_alloc failed\n");
goto post_cipher_err;
}
rc = ssi_aead_alloc(new_drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "ssi_aead_alloc failed\n");
goto post_hash_err;
}
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index c955e50..a33697b 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -116,9 +116,8 @@ static void ssi_hash_create_data_desc(
static void ssi_set_hash_endianity(u32 mode, struct cc_hw_desc *desc)
{
- if (unlikely(mode == DRV_HASH_MD5 ||
- mode == DRV_HASH_SHA384 ||
- mode == DRV_HASH_SHA512)) {
+ if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
+ mode == DRV_HASH_SHA512) {
set_bytes_swap(desc, 1);
} else {
set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
@@ -133,7 +132,7 @@ static int ssi_hash_map_result(struct device *dev,
dma_map_single(dev, (void *)state->digest_result_buff,
digestsize,
DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, state->digest_result_dma_addr))) {
+ if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
digestsize);
return -ENOMEM;
@@ -219,8 +218,8 @@ static int ssi_hash_map_request(struct device *dev,
memcpy(state->digest_buff, ctx->digest_buff,
ctx->inter_digestsize);
#if (DX_DEV_SHA_MAX > 256)
- if (unlikely(ctx->hash_mode == DRV_HASH_SHA512 ||
- ctx->hash_mode == DRV_HASH_SHA384))
+ if (ctx->hash_mode == DRV_HASH_SHA512 ||
+ ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len,
digest_len_sha512_init, HASH_LEN_SIZE);
else
@@ -254,7 +253,7 @@ static int ssi_hash_map_request(struct device *dev,
set_flow_mode(&desc, BYPASS);
rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto fail4;
}
@@ -446,18 +445,17 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
+ if (ssi_hash_map_request(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state,
- src, nbytes, 1))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -494,7 +492,7 @@ static int ssi_hash_digest(struct ahash_req_ctx *state,
NS_BIT);
} else {
set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- if (likely(nbytes))
+ if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
set_cipher_do(&desc[idx], DO_PAD);
@@ -576,7 +574,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
@@ -619,7 +617,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
block_size);
- if (unlikely(rc)) {
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
nbytes);
@@ -677,7 +675,7 @@ static int ssi_hash_update(struct ahash_req_ctx *state,
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
}
@@ -711,12 +709,11 @@ static int ssi_hash_finup(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
- nbytes, 1))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -809,7 +806,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
@@ -847,13 +844,12 @@ static int ssi_hash_final(struct ahash_req_ctx *state,
dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
nbytes);
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, src,
- nbytes, 0))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -955,7 +951,7 @@ ctx->drvdata, ctx->hash_mode), HASH_LEN_SIZE);
if (async_req) {
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
ssi_hash_unmap_result(dev, state, digestsize, result);
@@ -1019,8 +1015,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.key_dma_addr = dma_map_single(
dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev,
- ctx->key_params.key_dma_addr))) {
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1105,7 +1100,7 @@ static int ssi_ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
}
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
}
@@ -1201,7 +1196,7 @@ static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.key_dma_addr = dma_map_single(
dev, (void *)key,
keylen, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, ctx->key_params.key_dma_addr))) {
+ if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
return -ENOMEM;
@@ -1415,7 +1410,7 @@ static int ssi_mac_update(struct ahash_request *req)
rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
req->nbytes, block_size);
- if (unlikely(rc)) {
+ if (rc) {
if (rc == 1) {
dev_dbg(dev, " data size not require HW update %x\n",
req->nbytes);
@@ -1448,7 +1443,7 @@ static int ssi_mac_update(struct ahash_request *req)
ssi_req.user_arg = (void *)req;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
}
@@ -1482,13 +1477,13 @@ static int ssi_mac_final(struct ahash_request *req)
dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
- req->nbytes, 0))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 0)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1562,7 +1557,7 @@ static int ssi_mac_final(struct ahash_request *req)
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -1589,12 +1584,12 @@ static int ssi_mac_finup(struct ahash_request *req)
return ssi_mac_final(req);
}
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
- req->nbytes, 1))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
@@ -1635,7 +1630,7 @@ static int ssi_mac_finup(struct ahash_request *req)
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -1658,17 +1653,17 @@ static int ssi_mac_digest(struct ahash_request *req)
dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
- if (unlikely(ssi_hash_map_request(dev, state, ctx))) {
+ if (ssi_hash_map_request(dev, state, ctx)) {
dev_err(dev, "map_ahash_source() failed\n");
return -ENOMEM;
}
- if (unlikely(ssi_hash_map_result(dev, state, digestsize))) {
+ if (ssi_hash_map_result(dev, state, digestsize)) {
dev_err(dev, "map_ahash_digest() failed\n");
return -ENOMEM;
}
- if (unlikely(cc_map_hash_request_final(ctx->drvdata, state, req->src,
- req->nbytes, 1))) {
+ if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
+ req->nbytes, 1)) {
dev_err(dev, "map_ahash_request_final() failed\n");
return -ENOMEM;
}
@@ -1709,7 +1704,7 @@ static int ssi_mac_digest(struct ahash_request *req)
idx++;
rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
- if (unlikely(rc != -EINPROGRESS)) {
+ if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
ssi_hash_unmap_result(dev, state, digestsize, req->result);
@@ -2153,7 +2148,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(digest_len_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_init);
@@ -2165,7 +2160,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(digest_len_sha512_init),
larval_seq, &larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(digest_len_sha512_init);
@@ -2180,7 +2175,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(md5_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(md5_init);
larval_seq_len = 0;
@@ -2189,7 +2184,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha1_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha1_init);
larval_seq_len = 0;
@@ -2198,7 +2193,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha224_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha224_init);
larval_seq_len = 0;
@@ -2207,7 +2202,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
ARRAY_SIZE(sha256_init), larval_seq,
&larval_seq_len);
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc))
+ if (rc)
goto init_digest_const_err;
sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0;
@@ -2228,7 +2223,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
@@ -2246,7 +2241,7 @@ int ssi_hash_init_sram_digest_consts(struct ssi_drvdata *drvdata)
sram_buff_ofs += sizeof(u32);
}
rc = send_request_init(drvdata, larval_seq, larval_seq_len);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "send_request() failed (rc = %d)\n", rc);
goto init_digest_const_err;
}
@@ -2295,7 +2290,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
/*must be set before the alg registration as it is being used there*/
rc = ssi_hash_init_sram_digest_consts(drvdata);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
goto fail;
}
@@ -2316,7 +2311,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
@@ -2341,7 +2336,7 @@ int ssi_hash_alloc(struct ssi_drvdata *drvdata)
t_alg->drvdata = drvdata;
rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "%s alg registration failed\n",
driver_hash[alg].driver_name);
kfree(t_alg);
@@ -2480,7 +2475,7 @@ static void ssi_hash_create_data_desc(struct ahash_req_ctx *areq_ctx,
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (likely(areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI)) {
+ if (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_DLLI) {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
sg_dma_address(areq_ctx->curr_sg),
diff --git a/drivers/staging/ccree/ssi_ivgen.c b/drivers/staging/ccree/ssi_ivgen.c
index 4ca6ca7..febee22 100644
--- a/drivers/staging/ccree/ssi_ivgen.c
+++ b/drivers/staging/ccree/ssi_ivgen.c
@@ -143,7 +143,7 @@ int ssi_ivgen_init_sram_pool(struct ssi_drvdata *drvdata)
/* Generate initial pool */
rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
- if (unlikely(rc))
+ if (rc)
return rc;
/* Fire-and-forget */
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 9883d14..7a653f6 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -115,7 +115,7 @@ int request_mgr_init(struct ssi_drvdata *drvdata)
#ifdef COMP_IN_WQ
dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
- if (unlikely(!req_mgr_h->workq)) {
+ if (!req_mgr_h->workq) {
dev_err(dev, "Failed creating work queue\n");
rc = -ENOMEM;
goto req_mgr_init_err;
@@ -214,27 +214,25 @@ static int request_mgr_queues_status_check(
* be chaned during the poll because the spinlock_bh
* is held by the thread
*/
- if (unlikely(((req_mgr_h->req_queue_head + 1) &
- (MAX_REQUEST_QUEUE_SIZE - 1)) ==
- req_mgr_h->req_queue_tail)) {
+ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
+ req_mgr_h->req_queue_tail) {
dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
return -EBUSY;
}
- if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
+ if ((req_mgr_h->q_free_slots >= total_seq_len))
return 0;
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- if (unlikely(req_mgr_h->q_free_slots <
- req_mgr_h->min_free_hw_slots)) {
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) {
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
}
- if (likely(req_mgr_h->q_free_slots >= total_seq_len)) {
+ if (req_mgr_h->q_free_slots >= total_seq_len) {
/* If there is enough place return */
return 0;
}
@@ -296,7 +294,7 @@ int send_request(
*/
rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
max_required_seq_len);
- if (likely(rc == 0))
+ if (rc == 0)
/* There is enough place in the queue */
break;
/* something wrong release the spinlock*/
@@ -340,7 +338,7 @@ int send_request(
ssi_req->ivgen_dma_addr_len,
ssi_req->ivgen_size, iv_seq, &iv_seq_len);
- if (unlikely(rc)) {
+ if (rc) {
dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
spin_unlock_bh(&req_mgr_h->hw_lock);
#if defined(CONFIG_PM)
@@ -355,7 +353,7 @@ int send_request(
used_sw_slots = ((req_mgr_h->req_queue_head -
req_mgr_h->req_queue_tail) &
(MAX_REQUEST_QUEUE_SIZE - 1));
- if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
+ if (used_sw_slots > req_mgr_h->max_used_sw_slots)
req_mgr_h->max_used_sw_slots = used_sw_slots;
/* Enqueue request - must be locked with HW lock*/
@@ -381,7 +379,7 @@ int send_request(
enqueue_seq(cc_base, desc, len);
enqueue_seq(cc_base, &req_mgr_h->compl_desc, (is_dout ? 0 : 1));
- if (unlikely(req_mgr_h->q_free_slots < total_seq_len)) {
+ if (req_mgr_h->q_free_slots < total_seq_len) {
/* This situation should never occur. Maybe indicating problem
* with resuming power. Set the free slot count to 0 and hope
* for the best.
@@ -429,7 +427,7 @@ int send_request_init(
*/
rc = request_mgr_queues_status_check(drvdata, req_mgr_h,
total_seq_len);
- if (unlikely(rc))
+ if (rc)
return rc;
set_queue_last_ind(&desc[(len - 1)]);
@@ -489,7 +487,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
request_mgr_handle->axi_completed--;
/* Dequeue request */
- if (unlikely(*head == *tail)) {
+ if (*head == *tail) {
/* We are supposed to handle a completion but our
* queue is empty. This is not normal. Return and
* hope for the best.
@@ -518,7 +516,7 @@ static void proc_completions(struct ssi_drvdata *drvdata)
}
#endif /* COMPLETION_DELAY */
- if (likely(ssi_req->user_cb))
+ if (ssi_req->user_cb)
ssi_req->user_cb(dev, ssi_req->user_arg);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
diff --git a/drivers/staging/ccree/ssi_sram_mgr.c b/drivers/staging/ccree/ssi_sram_mgr.c
index b71460c..0704031 100644
--- a/drivers/staging/ccree/ssi_sram_mgr.c
+++ b/drivers/staging/ccree/ssi_sram_mgr.c
@@ -75,12 +75,12 @@ ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
struct device *dev = drvdata_to_dev(drvdata);
ssi_sram_addr_t p;
- if (unlikely((size & 0x3))) {
+ if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
size);
return NULL_SRAM_ADDR;
}
- if (unlikely(size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset))) {
+ if (size > (SSI_CC_SRAM_SIZE - smgr_ctx->sram_free_offset)) {
dev_err(dev, "Not enough space to allocate %u B (at offset %llu)\n",
size, smgr_ctx->sram_free_offset);
return NULL_SRAM_ADDR;
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 03/10] staging: ccree: remove more unnecessary parentheses
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 01/10] staging: ccree: remove inline qualifiers Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 02/10] staging: ccree: remove unproven likely/unlikely Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 04/10] staging: ccree: fix indentation of var assignment Gilad Ben-Yossef
` (7 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
The removal of likely/unlikely unearthed some more
unnecessary parentheses. Remove them for better readability.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_buffer_mgr.c | 4 ++--
drivers/staging/ccree/ssi_driver.c | 6 +++---
drivers/staging/ccree/ssi_request_mgr.c | 2 +-
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 7ceee91..bac9d12 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -882,7 +882,7 @@ static int cc_aead_chain_assoc(
else
areq_ctx->assoc_buff_type = SSI_DMA_BUF_MLLI;
- if ((do_chain) || areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
+ if (do_chain || areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) {
dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
@@ -1656,7 +1656,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
areq_ctx->mlli_params.mlli_dma_addr);
}
- if ((src) && areq_ctx->in_nents) {
+ if (src && areq_ctx->in_nents) {
dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
dma_unmap_sg(dev, src,
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 6282c37..0b2593f 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -111,7 +111,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if ((irr & SSI_COMP_IRQ_MASK)) {
+ if (irr & SSI_COMP_IRQ_MASK) {
/* Mask AXI completion interrupt - will be unmasked in
* Deferred service handler
*/
@@ -121,7 +121,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#ifdef CC_SUPPORT_FIPS
/* TEE FIPS interrupt */
- if ((irr & SSI_GPR0_IRQ_MASK)) {
+ if (irr & SSI_GPR0_IRQ_MASK) {
/* Mask interrupt - will be unmasked in Deferred service
* handler
*/
@@ -131,7 +131,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
}
#endif
/* AXI error interrupt */
- if ((irr & SSI_AXI_ERR_IRQ_MASK)) {
+ if (irr & SSI_AXI_ERR_IRQ_MASK) {
u32 axi_err;
/* Read the AXI error ID */
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index 7a653f6..e890cb6 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -221,7 +221,7 @@ static int request_mgr_queues_status_check(
return -EBUSY;
}
- if ((req_mgr_h->q_free_slots >= total_seq_len))
+ if (req_mgr_h->q_free_slots >= total_seq_len)
return 0;
/* Wait for space in HW queue. Poll constant num of iterations. */
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 04/10] staging: ccree: fix indentation of var assignment
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (2 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 03/10] staging: ccree: remove more unnecessary parentheses Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 05/10] staging: ccree: remove braces for single statement blocks Gilad Ben-Yossef
` (6 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
Fix indentation of var assignment split across lines for
better readability.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_aead.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 0b085dc..f214df7 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -1238,10 +1238,10 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode(
} else { /* Decrypt */
if (setup_flow_mode == S_DIN_to_AES)
data_flow_mode = is_single_pass ?
- AES_and_HASH : DIN_AES_DOUT;
+ AES_and_HASH : DIN_AES_DOUT;
else
data_flow_mode = is_single_pass ?
- DES_and_HASH : DIN_DES_DOUT;
+ DES_and_HASH : DIN_DES_DOUT;
}
return data_flow_mode;
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 05/10] staging: ccree: remove braces for single statement blocks
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (3 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 04/10] staging: ccree: fix indentation of var assignment Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 06/10] staging: ccree: remove interim DT docs Gilad Ben-Yossef
` (5 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
Remove braces from single statement if clause.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_request_mgr.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/staging/ccree/ssi_request_mgr.c b/drivers/staging/ccree/ssi_request_mgr.c
index e890cb6..5f34336 100644
--- a/drivers/staging/ccree/ssi_request_mgr.c
+++ b/drivers/staging/ccree/ssi_request_mgr.c
@@ -228,9 +228,8 @@ static int request_mgr_queues_status_check(
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
- if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) {
+ if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
- }
if (req_mgr_h->q_free_slots >= total_seq_len) {
/* If there is enough place return */
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 06/10] staging: ccree: remove interim DT docs
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (4 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 05/10] staging: ccree: remove braces for single statement blocks Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 07/10] staging: ccree: update TODO list Gilad Ben-Yossef
` (4 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
As proper DT bindings doc where submitted and ACKed,
remove the interim one.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
.../devicetree/bindings/crypto/arm-cryptocell.txt | 27 ----------------------
1 file changed, 27 deletions(-)
delete mode 100644 drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
diff --git a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
deleted file mode 100644
index 2ea6517..0000000
--- a/drivers/staging/ccree/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Arm TrustZone CryptoCell cryptographic accelerators
-
-Required properties:
-- compatible: must be "arm,cryptocell-712-ree".
-- reg: shall contain base register location and length.
- Typically length is 0x10000.
-- interrupts: shall contain the interrupt for the device.
-
-Optional properties:
-- interrupt-parent: can designate the interrupt controller the
- device interrupt is connected to, if needed.
-- clocks: may contain the clock handling the device, if needed.
-- power-domains: may contain a reference to the PM domain, if applicable.
-
-
-Examples:
-
-Zynq FPGA device
-----------------
-
- arm_cc7x: arm_cc7x@80000000 {
- compatible = "arm,cryptocell-712-ree";
- interrupt-parent = <&intc>;
- interrupts = < 0 30 4 >;
- reg = < 0x80000000 0x10000 >;
- };
-
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 07/10] staging: ccree: update TODO list
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (5 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 06/10] staging: ccree: remove interim DT docs Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 08/10] staging: ccree: NULLify backup_info when unused Gilad Ben-Yossef
` (3 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
Update the ccree staging TODO list in light of recent work.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/TODO | 23 ++---------------------
1 file changed, 2 insertions(+), 21 deletions(-)
diff --git a/drivers/staging/ccree/TODO b/drivers/staging/ccree/TODO
index c9f5754..f44edcd 100644
--- a/drivers/staging/ccree/TODO
+++ b/drivers/staging/ccree/TODO
@@ -6,25 +6,6 @@
* *
*************************************************************************
-ccree specific items
-a.k.a stuff fixing for this driver to move out of staging
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+1. Migrate sysfs to debugfs.
+2. Handle HW FIFO fullness more cleanly.
-1. Move to using Crypto Engine to handle backlog queueing.
-2. Remove synchronous algorithm support leftovers.
-3. Separate platform specific code for FIPS and power management into separate platform modules.
-4. Drop legacy kernel support code.
-5. Move most (all?) #ifdef CONFIG into inline functions.
-6. Remove all unused definitions.
-7. Re-factor to accomediate newer/older HW revisions besides the 712.
-8. Handle the many checkpatch errors.
-9. Implement ahash import/export correctly.
-10. Go through a proper review of DT bindings and sysfs ABI
-11. Sort out FIPS mode: bake tests into testmgr, sort out behaviour on error,
- figure if 3DES weak key check is needed
-
-Kernel infrastructure items
-a.k.a stuff we either neither need to fix in the kernel or understand what we're doing wrong
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-1. ahash import/export context has a PAGE_SIZE/8 size limit. We need more.
-2. Crypto Engine seems to be built for HW with hardware queue depth of 1, we have 600++.
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 08/10] staging: ccree: NULLify backup_info when unused
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (6 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 07/10] staging: ccree: update TODO list Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 09/10] staging: ccree: fix AEAD func naming convention Gilad Ben-Yossef
` (2 subsequent siblings)
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: devel, driverdev-devel, linux-kernel, stable, linux-crypto,
Ofir Drang
backup_info field is only allocated for decrypt code path.
The field was not nullified when not used causing a kfree
in an error handling path to attempt to free random
addresses as uncovered in stress testing.
Fixes: 737aed947f9b ("staging: ccree: save ciphertext for CTS IV")
Cc: stable@vger.kernel.org
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_cipher.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 9019615..7b484f1 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -907,6 +907,7 @@ static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
req_ctx->is_giv = false;
+ req_ctx->backup_info = NULL;
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src,
req->nbytes, req->info, ivsize,
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 09/10] staging: ccree: fix AEAD func naming convention
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (7 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 08/10] staging: ccree: NULLify backup_info when unused Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-03 13:58 ` [PATCH 10/10] staging: ccree: amend aead func def for readability Gilad Ben-Yossef
2017-12-04 9:42 ` [PATCH 00/10] staging: ccree: cleanups & fixes Dan Carpenter
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
The aead files was using a func naming convention which was inconsistent
(ssi vs. cc), included a useless prefix (ssi_aead) and often used
too long function names producing monster func names such as
ssi_aead_gcm_setup_ghash_desc() that made the call site code hard
to read.
Make the code more readable by switching to a simpler, consistent naming
conventionfor all the function defined in the file.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_aead.c | 544 ++++++++++++++++++-------------------
drivers/staging/ccree/ssi_aead.h | 4 +-
drivers/staging/ccree/ssi_driver.c | 6 +-
3 files changed, 273 insertions(+), 281 deletions(-)
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index f214df7..54edd99 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -51,7 +51,7 @@
/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01
-struct ssi_aead_handle {
+struct cc_aead_handle {
ssi_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
};
@@ -68,7 +68,7 @@ struct cc_xcbc_s {
dma_addr_t xcbc_keys_dma_addr;
};
-struct ssi_aead_ctx {
+struct cc_aead_ctx {
struct ssi_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
@@ -90,9 +90,9 @@ static inline bool valid_assoclen(struct aead_request *req)
return ((req->assoclen == 16) || (req->assoclen == 20));
}
-static void ssi_aead_exit(struct crypto_aead *tfm)
+static void cc_aead_exit(struct crypto_aead *tfm)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
@@ -144,10 +144,10 @@ static void ssi_aead_exit(struct crypto_aead *tfm)
}
}
-static int ssi_aead_init(struct crypto_aead *tfm)
+static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct ssi_crypto_alg *ssi_alg =
container_of(alg, struct ssi_crypto_alg, aead_alg);
struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
@@ -222,16 +222,16 @@ static int ssi_aead_init(struct crypto_aead *tfm)
return 0;
init_failed:
- ssi_aead_exit(tfm);
+ cc_aead_exit(tfm);
return -ENOMEM;
}
-static void ssi_aead_complete(struct device *dev, void *ssi_req)
+static void cc_aead_complete(struct device *dev, void *ssi_req)
{
struct aead_request *areq = (struct aead_request *)ssi_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
int err = 0;
cc_unmap_aead_request(dev, areq);
@@ -277,7 +277,7 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req)
aead_request_complete(areq, err);
}
-static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
/* Load the AES key */
hw_desc_init(&desc[0]);
@@ -317,7 +317,7 @@ static int xcbc_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return 4;
}
-static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
+static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
unsigned int digest_ofs = 0;
@@ -386,7 +386,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct ssi_aead_ctx *ctx)
return idx;
}
-static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
+static int validate_keys_sizes(struct cc_aead_ctx *ctx)
{
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -435,11 +435,11 @@ static int validate_keys_sizes(struct ssi_aead_ctx *ctx)
* (copy to intenral buffer or hash in case of key longer than block
*/
static int
-ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
struct ssi_crypto_req ssi_req = {};
@@ -557,9 +557,9 @@ ssi_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
static int
-ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct ssi_crypto_req ssi_req = {};
struct crypto_authenc_key_param *param;
@@ -619,7 +619,7 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
- rc = ssi_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
+ rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
if (rc)
goto badkey;
}
@@ -663,10 +663,10 @@ ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
}
#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
if (keylen < 3)
return -EINVAL;
@@ -674,15 +674,15 @@ static int ssi_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
keylen -= 3;
memcpy(ctx->ctr_nonce, key + keylen, 3);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
#endif /*SSI_CC_HAS_AES_CCM*/
-static int ssi_aead_setauthsize(
+static int cc_aead_setauthsize(
struct crypto_aead *authenc,
unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
/* Unsupported auth. sizes */
@@ -698,8 +698,8 @@ static int ssi_aead_setauthsize(
}
#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 8:
@@ -710,11 +710,11 @@ static int ssi_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_ccm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -729,19 +729,19 @@ static int ssi_ccm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
#endif /*SSI_CC_HAS_AES_CCM*/
static void
-ssi_aead_create_assoc_desc(
+cc_set_assoc_desc(
struct aead_request *areq,
unsigned int flow_mode,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
@@ -777,7 +777,7 @@ ssi_aead_create_assoc_desc(
}
static void
-ssi_aead_process_authenc_data_desc(
+cc_proc_authen_desc(
struct aead_request *areq,
unsigned int flow_mode,
struct cc_hw_desc desc[],
@@ -788,7 +788,7 @@ ssi_aead_process_authenc_data_desc(
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
switch (data_dma_type) {
@@ -844,7 +844,7 @@ ssi_aead_process_authenc_data_desc(
}
static void
-ssi_aead_process_cipher_data_desc(
+cc_proc_cipher_desc(
struct aead_request *areq,
unsigned int flow_mode,
struct cc_hw_desc desc[],
@@ -854,7 +854,7 @@ ssi_aead_process_cipher_data_desc(
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (areq_ctx->cryptlen == 0)
@@ -891,13 +891,13 @@ ssi_aead_process_cipher_data_desc(
*seq_size = (++idx);
}
-static void ssi_aead_process_digest_result_desc(
+static void cc_proc_digest_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -942,13 +942,13 @@ static void ssi_aead_process_digest_result_desc(
*seq_size = (++idx);
}
-static void ssi_aead_setup_cipher_desc(
+static void cc_set_cipher_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = req_ctx->hw_iv_size;
unsigned int idx = *seq_size;
@@ -988,7 +988,7 @@ static void ssi_aead_setup_cipher_desc(
*seq_size = idx;
}
-static void ssi_aead_process_cipher(
+static void cc_proc_cipher(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size,
@@ -1001,8 +1001,8 @@ static void ssi_aead_process_cipher(
if (req_ctx->cryptlen == 0)
return; /*null processing*/
- ssi_aead_setup_cipher_desc(req, desc, &idx);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc, &idx);
+ cc_set_cipher_desc(req, desc, &idx);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* We must wait for DMA to write all cipher */
hw_desc_init(&desc[idx]);
@@ -1014,13 +1014,13 @@ static void ssi_aead_process_cipher(
*seq_size = idx;
}
-static void ssi_aead_hmac_setup_digest_desc(
+static void cc_set_hmac_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1051,13 +1051,13 @@ static void ssi_aead_hmac_setup_digest_desc(
*seq_size = idx;
}
-static void ssi_aead_xcbc_setup_digest_desc(
+static void cc_set_xcbc_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
unsigned int idx = *seq_size;
/* Loading MAC state */
@@ -1113,7 +1113,7 @@ static void ssi_aead_xcbc_setup_digest_desc(
*seq_size = idx;
}
-static void ssi_aead_process_digest_header_desc(
+static void cc_proc_header_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1121,20 +1121,20 @@ static void ssi_aead_process_digest_header_desc(
unsigned int idx = *seq_size;
/* Hash associated data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
*seq_size = idx;
}
-static void ssi_aead_process_digest_scheme_desc(
+static void cc_proc_scheme_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_aead_handle *aead_handle = ctx->drvdata->aead_handle;
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
@@ -1192,14 +1192,14 @@ static void ssi_aead_process_digest_scheme_desc(
*seq_size = idx;
}
-static void ssi_aead_load_mlli_to_sram(
+static void cc_mlli_to_sram(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
if (req_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI ||
@@ -1221,7 +1221,7 @@ static void ssi_aead_load_mlli_to_sram(
}
}
-static enum cc_flow_mode ssi_aead_get_data_flow_mode(
+static enum cc_flow_mode cc_get_data_flow(
enum drv_crypto_direction direct,
enum cc_flow_mode setup_flow_mode,
bool is_single_pass)
@@ -1247,29 +1247,28 @@ static enum cc_flow_mode ssi_aead_get_data_flow_mode(
return data_flow_mode;
}
-static void ssi_aead_hmac_authenc(
+static void cc_hmac_authenc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
+ unsigned int data_flow_mode = cc_get_data_flow(
direct, ctx->flow_mode, req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc,
- seq_size);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1280,52 +1279,49 @@ static void ssi_aead_hmac_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
- seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first..*/
- ssi_aead_hmac_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
- seq_size, direct);
- ssi_aead_process_digest_scheme_desc(req, desc, seq_size);
+ cc_set_hmac_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_scheme_desc(req, desc, seq_size);
/* decrypt after.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
static void
-ssi_aead_xcbc_authenc(
+cc_xcbc_authenc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
- unsigned int data_flow_mode = ssi_aead_get_data_flow_mode(
+ unsigned int data_flow_mode = cc_get_data_flow(
direct, ctx->flow_mode, req_ctx->is_single_pass);
if (req_ctx->is_single_pass) {
/**
* Single-pass flow
*/
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_setup_cipher_desc(req, desc, seq_size);
- ssi_aead_process_digest_header_desc(req, desc, seq_size);
- ssi_aead_process_cipher_data_desc(req, data_flow_mode, desc,
- seq_size);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_set_cipher_desc(req, desc, seq_size);
+ cc_proc_header_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
return;
}
@@ -1336,27 +1332,25 @@ ssi_aead_xcbc_authenc(
*/
if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
/* encrypt first.. */
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* authenc after.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
- seq_size, direct);
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
+ cc_proc_digest_desc(req, desc, seq_size);
} else { /*DECRYPT*/
/* authenc first.. */
- ssi_aead_xcbc_setup_digest_desc(req, desc, seq_size);
- ssi_aead_process_authenc_data_desc(req, DIN_HASH, desc,
- seq_size, direct);
+ cc_set_xcbc_desc(req, desc, seq_size);
+ cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
/* decrypt after..*/
- ssi_aead_process_cipher(req, desc, seq_size, data_flow_mode);
+ cc_proc_cipher(req, desc, seq_size, data_flow_mode);
/* read the digest result with setting the completion bit
* must be after the cipher operation
*/
- ssi_aead_process_digest_result_desc(req, desc, seq_size);
+ cc_proc_digest_desc(req, desc, seq_size);
}
}
-static int validate_data_size(struct ssi_aead_ctx *ctx,
+static int validate_data_size(struct cc_aead_ctx *ctx,
enum drv_crypto_direction direct,
struct aead_request *req)
{
@@ -1455,13 +1449,13 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
return 0;
}
-static int ssi_aead_ccm(
+static int cc_ccm(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
unsigned int cipher_flow_mode;
@@ -1525,7 +1519,7 @@ static int ssi_aead_ccm(
/* process assoc data */
if (req->assoclen > 0) {
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
+ cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI,
@@ -1537,8 +1531,7 @@ static int ssi_aead_ccm(
/* process the cipher */
if (req_ctx->cryptlen)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc,
- &idx);
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
/* Read temporal MAC */
hw_desc_init(&desc[idx]);
@@ -1583,7 +1576,7 @@ static int ssi_aead_ccm(
static int config_ccm_adata(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
//unsigned int size_of_a = 0, rem_a_size = 0;
@@ -1639,10 +1632,10 @@ static int config_ccm_adata(struct aead_request *req)
return 0;
}
-static void ssi_rfc4309_ccm_process(struct aead_request *req)
+static void cc_proc_rfc4309_ccm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
/* L' */
@@ -1666,13 +1659,13 @@ static void ssi_rfc4309_ccm_process(struct aead_request *req)
#if SSI_CC_HAS_AES_GCM
-static void ssi_aead_gcm_setup_ghash_desc(
+static void cc_set_ghash_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1746,13 +1739,13 @@ static void ssi_aead_gcm_setup_ghash_desc(
*seq_size = idx;
}
-static void ssi_aead_gcm_setup_gctr_desc(
+static void cc_set_gctr_desc(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
@@ -1784,13 +1777,13 @@ static void ssi_aead_gcm_setup_gctr_desc(
*seq_size = idx;
}
-static void ssi_aead_process_gcm_result_desc(
+static void cc_proc_gcm_result(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
dma_addr_t mac_result;
unsigned int idx = *seq_size;
@@ -1850,7 +1843,7 @@ static void ssi_aead_process_gcm_result_desc(
*seq_size = idx;
}
-static int ssi_aead_gcm(
+static int cc_gcm(
struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
@@ -1866,37 +1859,36 @@ static int ssi_aead_gcm(
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
- ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
// for gcm and rfc4106.
- ssi_aead_gcm_setup_ghash_desc(req, desc, seq_size);
+ cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
if (req->assoclen > 0)
- ssi_aead_create_assoc_desc(req, DIN_HASH, desc, seq_size);
- ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
+ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
+ cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
if (req_ctx->cryptlen)
- ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc,
- seq_size);
- ssi_aead_process_gcm_result_desc(req, desc, seq_size);
+ cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
+ cc_proc_gcm_result(req, desc, seq_size);
return 0;
}
#ifdef CC_DEBUG
-static void ssi_aead_dump_gcm(
+static void cc_dump_gcm(
const char *title,
struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
if (ctx->cipher_mode != DRV_CIPHER_GCTR)
@@ -1940,7 +1932,7 @@ static void ssi_aead_dump_gcm(
static int config_gcm_context(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -1987,10 +1979,10 @@ static int config_gcm_context(struct aead_request *req)
return 0;
}
-static void ssi_rfc4_gcm_process(struct aead_request *req)
+static void cc_proc_rfc4_gcm(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
@@ -2003,14 +1995,14 @@ static void ssi_rfc4_gcm_process(struct aead_request *req)
#endif /*SSI_CC_HAS_AES_GCM*/
-static int ssi_aead_process(struct aead_request *req,
- enum drv_crypto_direction direct)
+static int cc_proc_aead(struct aead_request *req,
+ enum drv_crypto_direction direct)
{
int rc = 0;
int seq_len = 0;
struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct ssi_crypto_req ssi_req = {};
@@ -2031,7 +2023,7 @@ static int ssi_aead_process(struct aead_request *req,
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)ssi_aead_complete;
+ ssi_req.user_cb = (void *)cc_aead_complete;
ssi_req.user_arg = (void *)req;
/* Setup request context */
@@ -2138,26 +2130,26 @@ static int ssi_aead_process(struct aead_request *req,
/* STAT_PHASE_2: Create sequence */
/* Load MLLI tables to SRAM if necessary */
- ssi_aead_load_mlli_to_sram(req, desc, &seq_len);
+ cc_mlli_to_sram(req, desc, &seq_len);
/*TODO: move seq len by reference */
switch (ctx->auth_mode) {
case DRV_HASH_SHA1:
case DRV_HASH_SHA256:
- ssi_aead_hmac_authenc(req, desc, &seq_len);
+ cc_hmac_authenc(req, desc, &seq_len);
break;
case DRV_HASH_XCBC_MAC:
- ssi_aead_xcbc_authenc(req, desc, &seq_len);
+ cc_xcbc_authenc(req, desc, &seq_len);
break;
#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
case DRV_HASH_NULL:
#if SSI_CC_HAS_AES_CCM
if (ctx->cipher_mode == DRV_CIPHER_CCM)
- ssi_aead_ccm(req, desc, &seq_len);
+ cc_ccm(req, desc, &seq_len);
#endif /*SSI_CC_HAS_AES_CCM*/
#if SSI_CC_HAS_AES_GCM
if (ctx->cipher_mode == DRV_CIPHER_GCTR)
- ssi_aead_gcm(req, desc, &seq_len);
+ cc_gcm(req, desc, &seq_len);
#endif /*SSI_CC_HAS_AES_GCM*/
break;
#endif
@@ -2181,7 +2173,7 @@ static int ssi_aead_process(struct aead_request *req,
return rc;
}
-static int ssi_aead_encrypt(struct aead_request *req)
+static int cc_aead_encrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2193,7 +2185,7 @@ static int ssi_aead_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@@ -2201,13 +2193,13 @@ static int ssi_aead_encrypt(struct aead_request *req)
}
#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int rc = -EINVAL;
@@ -2221,9 +2213,9 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
out:
@@ -2231,7 +2223,7 @@ static int ssi_rfc4309_ccm_encrypt(struct aead_request *req)
}
#endif /* SSI_CC_HAS_AES_CCM */
-static int ssi_aead_decrypt(struct aead_request *req)
+static int cc_aead_decrypt(struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2243,7 +2235,7 @@ static int ssi_aead_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@@ -2251,10 +2243,10 @@ static int ssi_aead_decrypt(struct aead_request *req)
}
#if SSI_CC_HAS_AES_CCM
-static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
+static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2269,9 +2261,9 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
- ssi_rfc4309_ccm_process(req);
+ cc_proc_rfc4309_ccm(req);
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@@ -2282,10 +2274,10 @@ static int ssi_rfc4309_ccm_decrypt(struct aead_request *req)
#if SSI_CC_HAS_AES_GCM
-static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2296,13 +2288,13 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen)
+static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key);
@@ -2313,11 +2305,11 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
keylen -= 4;
memcpy(ctx->ctr_nonce, key + keylen, 4);
- return ssi_aead_setkey(tfm, key, keylen);
+ return cc_aead_setkey(tfm, key, keylen);
}
-static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
switch (authsize) {
case 4:
@@ -2332,13 +2324,13 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2352,13 +2344,13 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
return -EINVAL;
}
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(authenc);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
dev_dbg(dev, "authsize %d\n", authsize);
@@ -2366,15 +2358,15 @@ static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
if (authsize != 16)
return -EINVAL;
- return ssi_aead_setauthsize(authenc, authsize);
+ return cc_aead_setauthsize(authenc, authsize);
}
-static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2390,19 +2382,19 @@ static int ssi_rfc4106_gcm_encrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_encrypt() above. */
+ /* Very similar to cc_aead_encrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2414,22 +2406,22 @@ static int ssi_rfc4543_gcm_encrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
return rc;
}
-static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = -EINVAL;
@@ -2445,19 +2437,19 @@ static int ssi_rfc4106_gcm_decrypt(struct aead_request *req)
areq_ctx->plaintext_authenticate_only = false;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
out:
return rc;
}
-static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
+static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
{
- /* Very similar to ssi_aead_decrypt() above. */
+ /* Very similar to cc_aead_decrypt() above. */
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
@@ -2469,10 +2461,10 @@ static int ssi_rfc4543_gcm_decrypt(struct aead_request *req)
areq_ctx->backup_iv = req->iv;
areq_ctx->backup_giv = NULL;
- ssi_rfc4_gcm_process(req);
+ cc_proc_rfc4_gcm(req);
areq_ctx->is_gcm4543 = true;
- rc = ssi_aead_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+ rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
if (rc != -EINPROGRESS)
req->iv = areq_ctx->backup_iv;
@@ -2488,12 +2480,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2507,12 +2499,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2526,12 +2518,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2545,12 +2537,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = DES3_EDE_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2564,12 +2556,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = AES_BLOCK_SIZE,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2583,12 +2575,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -2602,12 +2594,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -2621,12 +2613,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_aead_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_aead_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2641,12 +2633,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_ccm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_ccm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2660,12 +2652,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4309_ccm_setkey,
- .setauthsize = ssi_rfc4309_ccm_setauthsize,
- .encrypt = ssi_rfc4309_ccm_encrypt,
- .decrypt = ssi_rfc4309_ccm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4309_ccm_setkey,
+ .setauthsize = cc_rfc4309_ccm_setauthsize,
+ .encrypt = cc_rfc4309_ccm_encrypt,
+ .decrypt = cc_rfc4309_ccm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = CCM_BLOCK_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2681,12 +2673,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_aead_setkey,
- .setauthsize = ssi_gcm_setauthsize,
- .encrypt = ssi_aead_encrypt,
- .decrypt = ssi_aead_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_aead_setkey,
+ .setauthsize = cc_gcm_setauthsize,
+ .encrypt = cc_aead_encrypt,
+ .decrypt = cc_aead_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = 12,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2700,12 +2692,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4106_gcm_setkey,
- .setauthsize = ssi_rfc4106_gcm_setauthsize,
- .encrypt = ssi_rfc4106_gcm_encrypt,
- .decrypt = ssi_rfc4106_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4106_gcm_setkey,
+ .setauthsize = cc_rfc4106_gcm_setauthsize,
+ .encrypt = cc_rfc4106_gcm_encrypt,
+ .decrypt = cc_rfc4106_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2719,12 +2711,12 @@ static struct ssi_alg_template aead_algs[] = {
.blocksize = 1,
.type = CRYPTO_ALG_TYPE_AEAD,
.template_aead = {
- .setkey = ssi_rfc4543_gcm_setkey,
- .setauthsize = ssi_rfc4543_gcm_setauthsize,
- .encrypt = ssi_rfc4543_gcm_encrypt,
- .decrypt = ssi_rfc4543_gcm_decrypt,
- .init = ssi_aead_init,
- .exit = ssi_aead_exit,
+ .setkey = cc_rfc4543_gcm_setkey,
+ .setauthsize = cc_rfc4543_gcm_setauthsize,
+ .encrypt = cc_rfc4543_gcm_encrypt,
+ .decrypt = cc_rfc4543_gcm_decrypt,
+ .init = cc_aead_init,
+ .exit = cc_aead_exit,
.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
@@ -2735,7 +2727,7 @@ static struct ssi_alg_template aead_algs[] = {
#endif /*SSI_CC_HAS_AES_GCM*/
};
-static struct ssi_crypto_alg *ssi_aead_create_alg(
+static struct ssi_crypto_alg *cc_create_aead_alg(
struct ssi_alg_template *template,
struct device *dev)
{
@@ -2755,11 +2747,11 @@ static struct ssi_crypto_alg *ssi_aead_create_alg(
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = SSI_CRA_PRIO;
- alg->base.cra_ctxsize = sizeof(struct ssi_aead_ctx);
+ alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
template->type;
- alg->init = ssi_aead_init;
- alg->exit = ssi_aead_exit;
+ alg->init = cc_aead_init;
+ alg->exit = cc_aead_exit;
t_alg->aead_alg = *alg;
@@ -2770,11 +2762,11 @@ static struct ssi_crypto_alg *ssi_aead_create_alg(
return t_alg;
}
-int ssi_aead_free(struct ssi_drvdata *drvdata)
+int cc_aead_free(struct ssi_drvdata *drvdata)
{
struct ssi_crypto_alg *t_alg, *n;
- struct ssi_aead_handle *aead_handle =
- (struct ssi_aead_handle *)drvdata->aead_handle;
+ struct cc_aead_handle *aead_handle =
+ (struct cc_aead_handle *)drvdata->aead_handle;
if (aead_handle) {
/* Remove registered algs */
@@ -2791,9 +2783,9 @@ int ssi_aead_free(struct ssi_drvdata *drvdata)
return 0;
}
-int ssi_aead_alloc(struct ssi_drvdata *drvdata)
+int cc_aead_alloc(struct ssi_drvdata *drvdata)
{
- struct ssi_aead_handle *aead_handle;
+ struct cc_aead_handle *aead_handle;
struct ssi_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
@@ -2819,7 +2811,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- t_alg = ssi_aead_create_alg(&aead_algs[alg], dev);
+ t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
if (IS_ERR(t_alg)) {
rc = PTR_ERR(t_alg);
dev_err(dev, "%s alg allocation failed\n",
@@ -2844,7 +2836,7 @@ int ssi_aead_alloc(struct ssi_drvdata *drvdata)
fail2:
kfree(t_alg);
fail1:
- ssi_aead_free(drvdata);
+ cc_aead_free(drvdata);
fail0:
return rc;
}
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
index 4e29063..5172241 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -116,7 +116,7 @@ struct aead_req_ctx {
bool plaintext_authenticate_only; //for gcm_rfc4543
};
-int ssi_aead_alloc(struct ssi_drvdata *drvdata);
-int ssi_aead_free(struct ssi_drvdata *drvdata);
+int cc_aead_alloc(struct ssi_drvdata *drvdata);
+int cc_aead_free(struct ssi_drvdata *drvdata);
#endif /*__SSI_AEAD_H__*/
diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
index 0b2593f..041624f 100644
--- a/drivers/staging/ccree/ssi_driver.c
+++ b/drivers/staging/ccree/ssi_driver.c
@@ -365,9 +365,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
goto post_cipher_err;
}
- rc = ssi_aead_alloc(new_drvdata);
+ rc = cc_aead_alloc(new_drvdata);
if (rc) {
- dev_err(dev, "ssi_aead_alloc failed\n");
+ dev_err(dev, "cc_aead_alloc failed\n");
goto post_hash_err;
}
@@ -417,7 +417,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
struct ssi_drvdata *drvdata =
(struct ssi_drvdata *)platform_get_drvdata(plat_dev);
- ssi_aead_free(drvdata);
+ cc_aead_free(drvdata);
ssi_hash_free(drvdata);
ssi_ablkcipher_free(drvdata);
ssi_ivgen_fini(drvdata);
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* [PATCH 10/10] staging: ccree: amend aead func def for readability
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (8 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 09/10] staging: ccree: fix AEAD func naming convention Gilad Ben-Yossef
@ 2017-12-03 13:58 ` Gilad Ben-Yossef
2017-12-04 9:42 ` [PATCH 00/10] staging: ccree: cleanups & fixes Dan Carpenter
10 siblings, 0 replies; 16+ messages in thread
From: Gilad Ben-Yossef @ 2017-12-03 13:58 UTC (permalink / raw)
To: Greg Kroah-Hartman
Cc: linux-crypto, devel, driverdev-devel, linux-kernel, Ofir Drang
Func definitions in the aead implementation were did not adhere to
coding style. Fix them for better readability.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
drivers/staging/ccree/ssi_aead.c | 141 ++++++++++++++-------------------------
1 file changed, 51 insertions(+), 90 deletions(-)
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index 54edd99..5548c7b 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -678,9 +678,8 @@ static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
}
#endif /*SSI_CC_HAS_AES_CCM*/
-static int cc_aead_setauthsize(
- struct crypto_aead *authenc,
- unsigned int authsize)
+static int cc_aead_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -733,12 +732,8 @@ static int cc_ccm_setauthsize(struct crypto_aead *authenc,
}
#endif /*SSI_CC_HAS_AES_CCM*/
-static void
-cc_set_assoc_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -776,13 +771,10 @@ cc_set_assoc_desc(
*seq_size = (++idx);
}
-static void
-cc_proc_authen_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- int direct)
+static void cc_proc_authen_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size, int direct)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
@@ -843,12 +835,10 @@ cc_proc_authen_desc(
*seq_size = (++idx);
}
-static void
-cc_proc_cipher_desc(
- struct aead_request *areq,
- unsigned int flow_mode,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_cipher_desc(struct aead_request *areq,
+ unsigned int flow_mode,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
@@ -891,10 +881,9 @@ cc_proc_cipher_desc(
*seq_size = (++idx);
}
-static void cc_proc_digest_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_digest_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -942,10 +931,9 @@ static void cc_proc_digest_desc(
*seq_size = (++idx);
}
-static void cc_set_cipher_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_cipher_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -988,11 +976,8 @@ static void cc_set_cipher_desc(
*seq_size = idx;
}
-static void cc_proc_cipher(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size,
- unsigned int data_flow_mode)
+static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size, unsigned int data_flow_mode)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
int direct = req_ctx->gen_ctx.op_type;
@@ -1014,10 +999,8 @@ static void cc_proc_cipher(
*seq_size = idx;
}
-static void cc_set_hmac_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1051,10 +1034,8 @@ static void cc_set_hmac_desc(
*seq_size = idx;
}
-static void cc_set_xcbc_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1113,10 +1094,9 @@ static void cc_set_xcbc_desc(
*seq_size = idx;
}
-static void cc_proc_header_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_header_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
unsigned int idx = *seq_size;
/* Hash associated data */
@@ -1127,10 +1107,9 @@ static void cc_proc_header_desc(
*seq_size = idx;
}
-static void cc_proc_scheme_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_scheme_desc(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1192,10 +1171,8 @@ static void cc_proc_scheme_desc(
*seq_size = idx;
}
-static void cc_mlli_to_sram(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_mlli_to_sram(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -1221,10 +1198,9 @@ static void cc_mlli_to_sram(
}
}
-static enum cc_flow_mode cc_get_data_flow(
- enum drv_crypto_direction direct,
- enum cc_flow_mode setup_flow_mode,
- bool is_single_pass)
+static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
+ enum cc_flow_mode setup_flow_mode,
+ bool is_single_pass)
{
enum cc_flow_mode data_flow_mode;
@@ -1247,10 +1223,8 @@ static enum cc_flow_mode cc_get_data_flow(
return data_flow_mode;
}
-static void cc_hmac_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1301,10 +1275,8 @@ static void cc_hmac_authenc(
}
static void
-cc_xcbc_authenc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1449,10 +1421,8 @@ static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
return 0;
}
-static int cc_ccm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1659,10 +1629,8 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
#if SSI_CC_HAS_AES_GCM
-static void cc_set_ghash_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_ghash_desc(struct aead_request *req,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1739,10 +1707,8 @@ static void cc_set_ghash_desc(
*seq_size = idx;
}
-static void cc_set_gctr_desc(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1777,10 +1743,9 @@ static void cc_set_gctr_desc(
*seq_size = idx;
}
-static void cc_proc_gcm_result(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static void cc_proc_gcm_result(struct aead_request *req,
+ struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1843,10 +1808,8 @@ static void cc_proc_gcm_result(
*seq_size = idx;
}
-static int cc_gcm(
- struct aead_request *req,
- struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
{
struct aead_req_ctx *req_ctx = aead_request_ctx(req);
unsigned int cipher_flow_mode;
@@ -1883,9 +1846,7 @@ static int cc_gcm(
}
#ifdef CC_DEBUG
-static void cc_dump_gcm(
- const char *title,
- struct aead_request *req)
+static void cc_dump_gcm(const char *title, struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
--
2.7.4
^ permalink raw reply related [flat|nested] 16+ messages in thread* Re: [PATCH 00/10] staging: ccree: cleanups & fixes
2017-12-03 13:58 [PATCH 00/10] staging: ccree: cleanups & fixes Gilad Ben-Yossef
` (9 preceding siblings ...)
2017-12-03 13:58 ` [PATCH 10/10] staging: ccree: amend aead func def for readability Gilad Ben-Yossef
@ 2017-12-04 9:42 ` Dan Carpenter
10 siblings, 0 replies; 16+ messages in thread
From: Dan Carpenter @ 2017-12-04 9:42 UTC (permalink / raw)
To: Gilad Ben-Yossef
Cc: devel, Greg Kroah-Hartman, driverdev-devel, linux-kernel,
linux-crypto, Ofir Drang
Looks good. Thanks!
regards,
dan carpenter
^ permalink raw reply [flat|nested] 16+ messages in thread