Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes.
  3 *
  4 * This is based largely upon crypto/sha256_generic.c
  5 *
  6 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
  7 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  8 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  9 * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
 10 */
 11
 12#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 13
 14#include <crypto/internal/hash.h>
 15#include <linux/init.h>
 16#include <linux/module.h>
 17#include <linux/mm.h>
 18#include <linux/types.h>
 19#include <crypto/sha2.h>
 20#include <crypto/sha256_base.h>
 21
 22#include <asm/pstate.h>
 23#include <asm/elf.h>
 24
 25#include "opcodes.h"
 26
 27asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data,
 28					 unsigned int rounds);
 29
 30static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
 31				    unsigned int len, unsigned int partial)
 32{
 33	unsigned int done = 0;
 34
 35	sctx->count += len;
 36	if (partial) {
 37		done = SHA256_BLOCK_SIZE - partial;
 38		memcpy(sctx->buf + partial, data, done);
 39		sha256_sparc64_transform(sctx->state, sctx->buf, 1);
 40	}
 41	if (len - done >= SHA256_BLOCK_SIZE) {
 42		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
 43
 44		sha256_sparc64_transform(sctx->state, data + done, rounds);
 45		done += rounds * SHA256_BLOCK_SIZE;
 46	}
 47
 48	memcpy(sctx->buf, data + done, len - done);
 49}
 50
 51static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data,
 52				 unsigned int len)
 53{
 54	struct sha256_state *sctx = shash_desc_ctx(desc);
 55	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
 56
 57	/* Handle the fast case right here */
 58	if (partial + len < SHA256_BLOCK_SIZE) {
 59		sctx->count += len;
 60		memcpy(sctx->buf + partial, data, len);
 61	} else
 62		__sha256_sparc64_update(sctx, data, len, partial);
 63
 64	return 0;
 65}
 66
 67static int sha256_sparc64_final(struct shash_desc *desc, u8 *out)
 68{
 69	struct sha256_state *sctx = shash_desc_ctx(desc);
 70	unsigned int i, index, padlen;
 71	__be32 *dst = (__be32 *)out;
 72	__be64 bits;
 73	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
 74
 75	bits = cpu_to_be64(sctx->count << 3);
 76
 77	/* Pad out to 56 mod 64 and append length */
 78	index = sctx->count % SHA256_BLOCK_SIZE;
 79	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index);
 80
 81	/* We need to fill a whole block for __sha256_sparc64_update() */
 82	if (padlen <= 56) {
 83		sctx->count += padlen;
 84		memcpy(sctx->buf + index, padding, padlen);
 85	} else {
 86		__sha256_sparc64_update(sctx, padding, padlen, index);
 87	}
 88	__sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
 89
 90	/* Store state in digest */
 91	for (i = 0; i < 8; i++)
 92		dst[i] = cpu_to_be32(sctx->state[i]);
 93
 94	/* Wipe context */
 95	memset(sctx, 0, sizeof(*sctx));
 96
 97	return 0;
 98}
 99
100static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
101{
102	u8 D[SHA256_DIGEST_SIZE];
103
104	sha256_sparc64_final(desc, D);
105
106	memcpy(hash, D, SHA224_DIGEST_SIZE);
107	memzero_explicit(D, SHA256_DIGEST_SIZE);
108
109	return 0;
110}
111
112static int sha256_sparc64_export(struct shash_desc *desc, void *out)
113{
114	struct sha256_state *sctx = shash_desc_ctx(desc);
115
116	memcpy(out, sctx, sizeof(*sctx));
117	return 0;
118}
119
120static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
121{
122	struct sha256_state *sctx = shash_desc_ctx(desc);
123
124	memcpy(sctx, in, sizeof(*sctx));
125	return 0;
126}
127
128static struct shash_alg sha256_alg = {
129	.digestsize	=	SHA256_DIGEST_SIZE,
130	.init		=	sha256_base_init,
131	.update		=	sha256_sparc64_update,
132	.final		=	sha256_sparc64_final,
133	.export		=	sha256_sparc64_export,
134	.import		=	sha256_sparc64_import,
135	.descsize	=	sizeof(struct sha256_state),
136	.statesize	=	sizeof(struct sha256_state),
137	.base		=	{
138		.cra_name	=	"sha256",
139		.cra_driver_name=	"sha256-sparc64",
140		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
141		.cra_blocksize	=	SHA256_BLOCK_SIZE,
142		.cra_module	=	THIS_MODULE,
143	}
144};
145
146static struct shash_alg sha224_alg = {
147	.digestsize	=	SHA224_DIGEST_SIZE,
148	.init		=	sha224_base_init,
149	.update		=	sha256_sparc64_update,
150	.final		=	sha224_sparc64_final,
151	.descsize	=	sizeof(struct sha256_state),
152	.base		=	{
153		.cra_name	=	"sha224",
154		.cra_driver_name=	"sha224-sparc64",
155		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
156		.cra_blocksize	=	SHA224_BLOCK_SIZE,
157		.cra_module	=	THIS_MODULE,
158	}
159};
160
161static bool __init sparc64_has_sha256_opcode(void)
162{
163	unsigned long cfr;
164
165	if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
166		return false;
167
168	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
169	if (!(cfr & CFR_SHA256))
170		return false;
171
172	return true;
173}
174
175static int __init sha256_sparc64_mod_init(void)
176{
177	if (sparc64_has_sha256_opcode()) {
178		int ret = crypto_register_shash(&sha224_alg);
179		if (ret < 0)
180			return ret;
181
182		ret = crypto_register_shash(&sha256_alg);
183		if (ret < 0) {
184			crypto_unregister_shash(&sha224_alg);
185			return ret;
186		}
187
188		pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
189		return 0;
190	}
191	pr_info("sparc64 sha256 opcode not available.\n");
192	return -ENODEV;
193}
194
195static void __exit sha256_sparc64_mod_fini(void)
196{
197	crypto_unregister_shash(&sha224_alg);
198	crypto_unregister_shash(&sha256_alg);
199}
200
201module_init(sha256_sparc64_mod_init);
202module_exit(sha256_sparc64_mod_fini);
203
204MODULE_LICENSE("GPL");
205MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
206
207MODULE_ALIAS_CRYPTO("sha224");
208MODULE_ALIAS_CRYPTO("sha256");
209
210#include "crop_devid.c"
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes.
  3 *
  4 * This is based largely upon crypto/sha256_generic.c
  5 *
  6 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
  7 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
  8 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  9 * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com>
 10 */
 11
 12#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 13
 14#include <crypto/internal/hash.h>
 15#include <linux/init.h>
 16#include <linux/module.h>
 17#include <linux/mm.h>
 18#include <linux/types.h>
 19#include <crypto/sha2.h>
 20#include <crypto/sha256_base.h>
 21
 22#include <asm/pstate.h>
 23#include <asm/elf.h>
 24
 25#include "opcodes.h"
 26
 27asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data,
 28					 unsigned int rounds);
 29
 30static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
 31				    unsigned int len, unsigned int partial)
 32{
 33	unsigned int done = 0;
 34
 35	sctx->count += len;
 36	if (partial) {
 37		done = SHA256_BLOCK_SIZE - partial;
 38		memcpy(sctx->buf + partial, data, done);
 39		sha256_sparc64_transform(sctx->state, sctx->buf, 1);
 40	}
 41	if (len - done >= SHA256_BLOCK_SIZE) {
 42		const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
 43
 44		sha256_sparc64_transform(sctx->state, data + done, rounds);
 45		done += rounds * SHA256_BLOCK_SIZE;
 46	}
 47
 48	memcpy(sctx->buf, data + done, len - done);
 49}
 50
 51static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data,
 52				 unsigned int len)
 53{
 54	struct sha256_state *sctx = shash_desc_ctx(desc);
 55	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
 56
 57	/* Handle the fast case right here */
 58	if (partial + len < SHA256_BLOCK_SIZE) {
 59		sctx->count += len;
 60		memcpy(sctx->buf + partial, data, len);
 61	} else
 62		__sha256_sparc64_update(sctx, data, len, partial);
 63
 64	return 0;
 65}
 66
 67static int sha256_sparc64_final(struct shash_desc *desc, u8 *out)
 68{
 69	struct sha256_state *sctx = shash_desc_ctx(desc);
 70	unsigned int i, index, padlen;
 71	__be32 *dst = (__be32 *)out;
 72	__be64 bits;
 73	static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
 74
 75	bits = cpu_to_be64(sctx->count << 3);
 76
 77	/* Pad out to 56 mod 64 and append length */
 78	index = sctx->count % SHA256_BLOCK_SIZE;
 79	padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index);
 80
 81	/* We need to fill a whole block for __sha256_sparc64_update() */
 82	if (padlen <= 56) {
 83		sctx->count += padlen;
 84		memcpy(sctx->buf + index, padding, padlen);
 85	} else {
 86		__sha256_sparc64_update(sctx, padding, padlen, index);
 87	}
 88	__sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
 89
 90	/* Store state in digest */
 91	for (i = 0; i < 8; i++)
 92		dst[i] = cpu_to_be32(sctx->state[i]);
 93
 94	/* Wipe context */
 95	memset(sctx, 0, sizeof(*sctx));
 96
 97	return 0;
 98}
 99
100static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
101{
102	u8 D[SHA256_DIGEST_SIZE];
103
104	sha256_sparc64_final(desc, D);
105
106	memcpy(hash, D, SHA224_DIGEST_SIZE);
107	memzero_explicit(D, SHA256_DIGEST_SIZE);
108
109	return 0;
110}
111
112static int sha256_sparc64_export(struct shash_desc *desc, void *out)
113{
114	struct sha256_state *sctx = shash_desc_ctx(desc);
115
116	memcpy(out, sctx, sizeof(*sctx));
117	return 0;
118}
119
120static int sha256_sparc64_import(struct shash_desc *desc, const void *in)
121{
122	struct sha256_state *sctx = shash_desc_ctx(desc);
123
124	memcpy(sctx, in, sizeof(*sctx));
125	return 0;
126}
127
128static struct shash_alg sha256_alg = {
129	.digestsize	=	SHA256_DIGEST_SIZE,
130	.init		=	sha256_base_init,
131	.update		=	sha256_sparc64_update,
132	.final		=	sha256_sparc64_final,
133	.export		=	sha256_sparc64_export,
134	.import		=	sha256_sparc64_import,
135	.descsize	=	sizeof(struct sha256_state),
136	.statesize	=	sizeof(struct sha256_state),
137	.base		=	{
138		.cra_name	=	"sha256",
139		.cra_driver_name=	"sha256-sparc64",
140		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
141		.cra_blocksize	=	SHA256_BLOCK_SIZE,
142		.cra_module	=	THIS_MODULE,
143	}
144};
145
146static struct shash_alg sha224_alg = {
147	.digestsize	=	SHA224_DIGEST_SIZE,
148	.init		=	sha224_base_init,
149	.update		=	sha256_sparc64_update,
150	.final		=	sha224_sparc64_final,
151	.descsize	=	sizeof(struct sha256_state),
152	.base		=	{
153		.cra_name	=	"sha224",
154		.cra_driver_name=	"sha224-sparc64",
155		.cra_priority	=	SPARC_CR_OPCODE_PRIORITY,
156		.cra_blocksize	=	SHA224_BLOCK_SIZE,
157		.cra_module	=	THIS_MODULE,
158	}
159};
160
161static bool __init sparc64_has_sha256_opcode(void)
162{
163	unsigned long cfr;
164
165	if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
166		return false;
167
168	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
169	if (!(cfr & CFR_SHA256))
170		return false;
171
172	return true;
173}
174
175static int __init sha256_sparc64_mod_init(void)
176{
177	if (sparc64_has_sha256_opcode()) {
178		int ret = crypto_register_shash(&sha224_alg);
179		if (ret < 0)
180			return ret;
181
182		ret = crypto_register_shash(&sha256_alg);
183		if (ret < 0) {
184			crypto_unregister_shash(&sha224_alg);
185			return ret;
186		}
187
188		pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n");
189		return 0;
190	}
191	pr_info("sparc64 sha256 opcode not available.\n");
192	return -ENODEV;
193}
194
195static void __exit sha256_sparc64_mod_fini(void)
196{
197	crypto_unregister_shash(&sha224_alg);
198	crypto_unregister_shash(&sha256_alg);
199}
200
201module_init(sha256_sparc64_mod_init);
202module_exit(sha256_sparc64_mod_fini);
203
204MODULE_LICENSE("GPL");
205MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
206
207MODULE_ALIAS_CRYPTO("sha224");
208MODULE_ALIAS_CRYPTO("sha256");
209
210#include "crop_devid.c"