crypto/fsl - Add progressive hashing support using hardware acceleration.

Currently only normal hashing is supported using hardware acceleration.
Added support for progressive hashing using hardware.

Signed-off-by: Ruchika Gupta <ruchika.gupta@freescale.com>
Signed-off-by: Gaurav Rana <gaurav.rana@freescale.com>
CC: Simon Glass <sjg@chromium.org>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: York Sun <yorksun@freescale.com>
diff --git a/Kconfig b/Kconfig
index 75bab7f..fb012cf 100644
--- a/Kconfig
+++ b/Kconfig
@@ -141,7 +141,9 @@
 	select RSA
 	help
 	  This option enables signature verification of FIT uImages,
-	  using a hash signed and verified using RSA.
+	  using a hash signed and verified using RSA. If
+	  CONFIG_SHA_PROG_HW_ACCEL is defined, i.e support for progressive
+	  hashing is available using hardware, RSA library will use it.
 	  See doc/uImage.FIT/signature.txt for more details.
 
 config SYS_EXTRA_OPTIONS
diff --git a/README b/README
index 6800b95..febefb5 100644
--- a/README
+++ b/README
@@ -3152,8 +3152,18 @@
 		Enable the hash verify command (hash -v). This adds to code
 		size a little.
 
-		CONFIG_SHA1 - support SHA1 hashing
-		CONFIG_SHA256 - support SHA256 hashing
+		CONFIG_SHA1 - This option enables support of hashing using SHA1
+		algorithm. The hash is calculated in software.
+		CONFIG_SHA256 - This option enables support of hashing using
+		SHA256 algorithm. The hash is calculated in software.
+		CONFIG_SHA_HW_ACCEL - This option enables hardware acceleration
+		for SHA1/SHA256 hashing.
+		This affects the 'hash' command and also the
+		hash_lookup_algo() function.
+		CONFIG_SHA_PROG_HW_ACCEL - This option enables
+		hardware-acceleration for SHA1/SHA256 progressive hashing.
+		Data can be streamed in a block at a time and the hashing
+		is performed in hardware.
 
 		Note: There is also a sha1sum command, which should perhaps
 		be deprecated in favour of 'hash sha1'.
@@ -3447,8 +3457,10 @@
 
 		CONFIG_FIT_SIGNATURE
 		This option enables signature verification of FIT uImages,
-		using a hash signed and verified using RSA. See
-		doc/uImage.FIT/signature.txt for more details.
+		using a hash signed and verified using RSA. If
+		CONFIG_SHA_PROG_HW_ACCEL is defined, i.e support for progressive
+		hashing is available using hardware, RSA library will use it.
+		See doc/uImage.FIT/signature.txt for more details.
 
 		WARNING: When relying on signed FIT images with required
 		signature check the legacy image format is default
diff --git a/common/hash.c b/common/hash.c
index d154d02..9e9f84b 100644
--- a/common/hash.c
+++ b/common/hash.c
@@ -127,11 +127,21 @@
 		SHA1_SUM_LEN,
 		hw_sha1,
 		CHUNKSZ_SHA1,
+#ifdef CONFIG_SHA_PROG_HW_ACCEL
+		hw_sha_init,
+		hw_sha_update,
+		hw_sha_finish,
+#endif
 	}, {
 		"sha256",
 		SHA256_SUM_LEN,
 		hw_sha256,
 		CHUNKSZ_SHA256,
+#ifdef CONFIG_SHA_PROG_HW_ACCEL
+		hw_sha_init,
+		hw_sha_update,
+		hw_sha_finish,
+#endif
 	},
 #endif
 #ifdef CONFIG_SHA1
diff --git a/drivers/crypto/fsl/fsl_hash.c b/drivers/crypto/fsl/fsl_hash.c
index d77f257..c298404 100644
--- a/drivers/crypto/fsl/fsl_hash.c
+++ b/drivers/crypto/fsl/fsl_hash.c
@@ -10,6 +10,9 @@
 #include "jobdesc.h"
 #include "desc.h"
 #include "jr.h"
+#include "fsl_hash.h"
+#include <hw_sha.h>
+#include <asm-generic/errno.h>
 
 #define CRYPTO_MAX_ALG_NAME	80
 #define SHA1_DIGEST_SIZE        20
@@ -39,6 +42,122 @@
 	},
 };
 
+static enum caam_hash_algos get_hash_type(struct hash_algo *algo)
+{
+	if (!strcmp(algo->name, driver_hash[SHA1].name))
+		return SHA1;
+	else
+		return SHA256;
+}
+
+/* Create the context for progressive hashing using h/w acceleration.
+ *
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -ENOMEM on error
+ */
+static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo)
+{
+	*ctxp = calloc(1, sizeof(struct sha_ctx));
+	if (*ctxp == NULL) {
+		debug("Cannot allocate memory for context\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/*
+ * Update sg table for progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ * We support at most 32 Scatter/Gather Entries.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_update(void *hash_ctx, const void *buf,
+			    unsigned int size, int is_last,
+			    enum caam_hash_algos caam_algo)
+{
+	uint32_t final = 0;
+	dma_addr_t addr = virt_to_phys((void *)buf);
+	struct sha_ctx *ctx = hash_ctx;
+
+	if (ctx->sg_num >= MAX_SG_32) {
+		free(ctx);
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_PHYS_64BIT
+	ctx->sg_tbl[ctx->sg_num].addr_hi = addr >> 32;
+#else
+	ctx->sg_tbl[ctx->sg_num].addr_hi = 0x0;
+#endif
+	ctx->sg_tbl[ctx->sg_num].addr_lo = addr;
+
+	sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag,
+		  (size & SG_ENTRY_LENGTH_MASK));
+
+	ctx->sg_num++;
+
+	if (is_last) {
+		final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) |
+			SG_ENTRY_FINAL_BIT;
+		sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final);
+	}
+
+	return 0;
+}
+
+/*
+ * Perform progressive hashing on the given buffer and copy hash at
+ * destination buffer
+ *
+ * The context is freed after completion of hash operation.
+ *
+ * @hash_ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @caam_algo: Enum for SHA1 or SHA256
+ * @return 0 if ok, -EINVAL on error
+ */
+static int caam_hash_finish(void *hash_ctx, void *dest_buf,
+			    int size, enum caam_hash_algos caam_algo)
+{
+	uint32_t len = 0;
+	struct sha_ctx *ctx = hash_ctx;
+	int i = 0, ret = 0;
+
+	if (size < driver_hash[caam_algo].digestsize) {
+		free(ctx);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ctx->sg_num; i++)
+		len += (sec_in32(&ctx->sg_tbl[i].len_flag) &
+			SG_ENTRY_LENGTH_MASK);
+
+	inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len,
+				  ctx->hash,
+				  driver_hash[caam_algo].alg_type,
+				  driver_hash[caam_algo].digestsize,
+				  1);
+
+	ret = run_descriptor_jr(ctx->sha_desc);
+
+	if (ret)
+		debug("Error %x\n", ret);
+	else
+		memcpy(dest_buf, ctx->hash, sizeof(ctx->hash));
+
+	free(ctx);
+	return ret;
+}
+
 int caam_hash(const unsigned char *pbuf, unsigned int buf_len,
 	      unsigned char *pout, enum caam_hash_algos algo)
 {
@@ -48,7 +167,7 @@
 	desc = malloc(sizeof(int) * MAX_CAAM_DESCSIZE);
 	if (!desc) {
 		debug("Not enough memory for descriptor allocation\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout,
@@ -75,3 +194,20 @@
 	if (caam_hash(pbuf, buf_len, pout, SHA1))
 		printf("CAAM was not setup properly or it is faulty\n");
 }
+
+int hw_sha_init(struct hash_algo *algo, void **ctxp)
+{
+	return caam_hash_init(ctxp, get_hash_type(algo));
+}
+
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+			    unsigned int size, int is_last)
+{
+	return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo));
+}
+
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+		     int size)
+{
+	return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo));
+}
diff --git a/drivers/crypto/fsl/fsl_hash.h b/drivers/crypto/fsl/fsl_hash.h
new file mode 100644
index 0000000..f5be651
--- /dev/null
+++ b/drivers/crypto/fsl/fsl_hash.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier:	GPL-2.0+
+ *
+ */
+
+#ifndef _SHA_H
+#define _SHA_H
+
+#include <fsl_sec.h>
+#include <hash.h>
+#include "jr.h"
+
+/* We support at most 32 Scatter/Gather Entries.*/
+#define MAX_SG_32	32
+
+/*
+ * Hash context contains the following fields
+ * @sha_desc: Sha Descriptor
+ * @sg_num: number of entries in sg table
+ * @len: total length of buffer
+ * @sg_tbl: sg entry table
+ * @hash: index to the hash calculated
+ */
+struct sha_ctx {
+	uint32_t sha_desc[64];
+	uint32_t sg_num;
+	uint32_t len;
+	struct sg_entry sg_tbl[MAX_SG_32];
+	u8 hash[HASH_MAX_DIGEST_SIZE];
+};
+
+#endif
diff --git a/include/fsl_sec.h b/include/fsl_sec.h
index aa850a3..b6e6f04 100644
--- a/include/fsl_sec.h
+++ b/include/fsl_sec.h
@@ -175,6 +175,32 @@
 	u32 jrcr;
 };
 
+/*
+ * Scatter Gather Entry - Specifies the the Scatter Gather Format
+ * related information
+ */
+struct sg_entry {
+#ifdef CONFIG_SYS_FSL_SEC_LE
+	uint32_t addr_lo;	/* Memory Address - lo */
+	uint16_t addr_hi;	/* Memory Address of start of buffer - hi */
+	uint16_t reserved_zero;
+#else
+	uint16_t reserved_zero;
+	uint16_t addr_hi;	/* Memory Address of start of buffer - hi */
+	uint32_t addr_lo;	/* Memory Address - lo */
+#endif
+
+	uint32_t len_flag;	/* Length of the data in the frame */
+#define SG_ENTRY_LENGTH_MASK	0x3FFFFFFF
+#define SG_ENTRY_EXTENSION_BIT	0x80000000
+#define SG_ENTRY_FINAL_BIT	0x40000000
+	uint32_t bpid_offset;
+#define SG_ENTRY_BPID_MASK	0x00FF0000
+#define SG_ENTRY_BPID_SHIFT	16
+#define SG_ENTRY_OFFSET_MASK	0x00001FFF
+#define SG_ENTRY_OFFSET_SHIFT	0
+};
+
 int sec_init(void);
 #endif
 
diff --git a/include/hw_sha.h b/include/hw_sha.h
index 783350d..ab19a99 100644
--- a/include/hw_sha.h
+++ b/include/hw_sha.h
@@ -7,7 +7,7 @@
  */
 #ifndef __HW_SHA_H
 #define __HW_SHA_H
-
+#include <hash.h>
 
 /**
  * Computes hash value of input pbuf using h/w acceleration
@@ -34,4 +34,43 @@
  */
 void hw_sha1(const uchar * in_addr, uint buflen,
 			uchar * out_addr, uint chunk_size);
+
+/*
+ * Create the context for sha progressive hashing using h/w acceleration
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_init(struct hash_algo *algo, void **ctxp);
+
+/*
+ * Update buffer for sha progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+		     unsigned int size, int is_last);
+
+/*
+ * Copy sha hash result at destination location
+ *
+ * The context is freed after completion of hash operation or after an error.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+		     int size);
+
 #endif
diff --git a/lib/Kconfig b/lib/Kconfig
index a1f30a2..c9d2767 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -29,4 +29,40 @@
 
 source lib/rsa/Kconfig
 
+menu "Hashing Support"
+
+config SHA1
+	bool "Enable SHA1 support"
+	help
+	  This option enables support of hashing using SHA1 algorithm.
+	  The hash is calculated in software.
+	  The SHA1 algorithm produces a 160-bit (20-byte) hash value
+	  (digest).
+
+config SHA256
+	bool "Enable SHA256 support"
+	help
+	  This option enables support of hashing using SHA256 algorithm.
+	  The hash is calculated in software.
+	  The SHA256 algorithm produces a 256-bit (32-byte) hash value
+	  (digest).
+
+config SHA_HW_ACCEL
+	bool "Enable hashing using hardware"
+	help
+	  This option enables hardware acceleration
+	  for SHA1/SHA256 hashing.
+	  This affects the 'hash' command and also the
+	  hash_lookup_algo() function.
+
+config SHA_PROG_HW_ACCEL
+	bool "Enable Progressive hashing support using hardware"
+	depends on SHA_HW_ACCEL
+	help
+	  This option enables hardware-acceleration for
+	  SHA1/SHA256 progressive hashing.
+	  Data can be streamed in a block at a time and the hashing
+	  is performed in hardware.
+endmenu
+
 endmenu