Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/**
  3 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
  4 *
  5 * Copyright (C) 2011-2012 International Business Machines Inc.
  6 *
  7 * Author: Kent Yoder <yoder1@us.ibm.com>
  8 */
  9
 10#include <crypto/internal/hash.h>
 11#include <crypto/sha.h>
 12#include <linux/module.h>
 13#include <asm/vio.h>
 14#include <asm/byteorder.h>
 15
 16#include "nx_csbcpb.h"
 17#include "nx.h"
 18
 19
 20static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
 21{
 22	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
 23	int err;
 24
 25	err = nx_crypto_ctx_sha_init(tfm);
 26	if (err)
 27		return err;
 28
 29	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 30
 31	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
 32
 33	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
 34
 35	return 0;
 36}
 37
 38static int nx_sha256_init(struct shash_desc *desc) {
 39	struct sha256_state *sctx = shash_desc_ctx(desc);
 40
 41	memset(sctx, 0, sizeof *sctx);
 42
 43	sctx->state[0] = __cpu_to_be32(SHA256_H0);
 44	sctx->state[1] = __cpu_to_be32(SHA256_H1);
 45	sctx->state[2] = __cpu_to_be32(SHA256_H2);
 46	sctx->state[3] = __cpu_to_be32(SHA256_H3);
 47	sctx->state[4] = __cpu_to_be32(SHA256_H4);
 48	sctx->state[5] = __cpu_to_be32(SHA256_H5);
 49	sctx->state[6] = __cpu_to_be32(SHA256_H6);
 50	sctx->state[7] = __cpu_to_be32(SHA256_H7);
 51	sctx->count = 0;
 52
 53	return 0;
 54}
 55
 56static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 57			    unsigned int len)
 58{
 59	struct sha256_state *sctx = shash_desc_ctx(desc);
 60	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 61	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
 62	struct nx_sg *out_sg;
 63	u64 to_process = 0, leftover, total;
 64	unsigned long irq_flags;
 65	int rc = 0;
 66	int data_len;
 67	u32 max_sg_len;
 68	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
 69
 70	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 71
 72	/* 2 cases for total data len:
 73	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
 74	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
 75	 */
 76	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
 77	if (total < SHA256_BLOCK_SIZE) {
 78		memcpy(sctx->buf + buf_len, data, len);
 79		sctx->count += len;
 80		goto out;
 81	}
 82
 83	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
 84	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 85	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 86
 87	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 88			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 89	max_sg_len = min_t(u64, max_sg_len,
 90			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 91
 92	data_len = SHA256_DIGEST_SIZE;
 93	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
 94				  &data_len, max_sg_len);
 95	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 96
 97	if (data_len != SHA256_DIGEST_SIZE) {
 98		rc = -EINVAL;
 99		goto out;
100	}
101
102	do {
103		int used_sgs = 0;
104		struct nx_sg *in_sg = nx_ctx->in_sg;
105
106		if (buf_len) {
107			data_len = buf_len;
108			in_sg = nx_build_sg_list(in_sg,
109						 (u8 *) sctx->buf,
110						 &data_len,
111						 max_sg_len);
112
113			if (data_len != buf_len) {
114				rc = -EINVAL;
115				goto out;
116			}
117			used_sgs = in_sg - nx_ctx->in_sg;
118		}
119
120		/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
121		 * processed in this iteration. This value is restricted
122		 * by sg list limits and number of sgs we already used
123		 * for leftover data. (see above)
124		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
125		 * but because data may not be aligned, we need to account
126		 * for that too. */
127		to_process = min_t(u64, total,
128			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
129		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
130
131		data_len = to_process - buf_len;
132		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
133					 &data_len, max_sg_len);
134
135		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
136
137		to_process = data_len + buf_len;
138		leftover = total - to_process;
139
140		/*
141		 * we've hit the nx chip previously and we're updating
142		 * again, so copy over the partial digest.
143		 */
144		memcpy(csbcpb->cpb.sha256.input_partial_digest,
145			       csbcpb->cpb.sha256.message_digest,
146			       SHA256_DIGEST_SIZE);
147
148		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
149			rc = -EINVAL;
150			goto out;
151		}
152
153		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
154		if (rc)
155			goto out;
156
157		atomic_inc(&(nx_ctx->stats->sha256_ops));
158
159		total -= to_process;
160		data += to_process - buf_len;
161		buf_len = 0;
162
163	} while (leftover >= SHA256_BLOCK_SIZE);
164
165	/* copy the leftover back into the state struct */
166	if (leftover)
167		memcpy(sctx->buf, data, leftover);
168
169	sctx->count += len;
170	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
171out:
172	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
173	return rc;
174}
175
176static int nx_sha256_final(struct shash_desc *desc, u8 *out)
177{
178	struct sha256_state *sctx = shash_desc_ctx(desc);
179	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
180	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
181	struct nx_sg *in_sg, *out_sg;
182	unsigned long irq_flags;
183	u32 max_sg_len;
184	int rc = 0;
185	int len;
186
187	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
188
189	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
190			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
191	max_sg_len = min_t(u64, max_sg_len,
192			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
193
194	/* final is represented by continuing the operation and indicating that
195	 * this is not an intermediate operation */
196	if (sctx->count >= SHA256_BLOCK_SIZE) {
197		/* we've hit the nx chip previously, now we're finalizing,
198		 * so copy over the partial digest */
199		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
200		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
201		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
202	} else {
203		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
204		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
205	}
206
207	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
208
209	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
210	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
211				 &len, max_sg_len);
212
213	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
214		rc = -EINVAL;
215		goto out;
216	}
217
218	len = SHA256_DIGEST_SIZE;
219	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
220
221	if (len != SHA256_DIGEST_SIZE) {
222		rc = -EINVAL;
223		goto out;
224	}
225
226	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
227	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
228	if (!nx_ctx->op.outlen) {
229		rc = -EINVAL;
230		goto out;
231	}
232
233	rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
234	if (rc)
235		goto out;
236
237	atomic_inc(&(nx_ctx->stats->sha256_ops));
238
239	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
240	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
241out:
242	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
243	return rc;
244}
245
246static int nx_sha256_export(struct shash_desc *desc, void *out)
247{
248	struct sha256_state *sctx = shash_desc_ctx(desc);
249
250	memcpy(out, sctx, sizeof(*sctx));
251
252	return 0;
253}
254
255static int nx_sha256_import(struct shash_desc *desc, const void *in)
256{
257	struct sha256_state *sctx = shash_desc_ctx(desc);
258
259	memcpy(sctx, in, sizeof(*sctx));
260
261	return 0;
262}
263
264struct shash_alg nx_shash_sha256_alg = {
265	.digestsize = SHA256_DIGEST_SIZE,
266	.init       = nx_sha256_init,
267	.update     = nx_sha256_update,
268	.final      = nx_sha256_final,
269	.export     = nx_sha256_export,
270	.import     = nx_sha256_import,
271	.descsize   = sizeof(struct sha256_state),
272	.statesize  = sizeof(struct sha256_state),
273	.base       = {
274		.cra_name        = "sha256",
275		.cra_driver_name = "sha256-nx",
276		.cra_priority    = 300,
277		.cra_blocksize   = SHA256_BLOCK_SIZE,
278		.cra_module      = THIS_MODULE,
279		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
280		.cra_init        = nx_crypto_ctx_sha256_init,
281		.cra_exit        = nx_crypto_ctx_exit,
282	}
283};